File size: 1,557 Bytes
dcdaaec
f306d19
a134b00
557899a
8e5beee
b30b5d8
 
 
c4556a0
ebb0ce9
39f1739
f306d19
8e5beee
 
 
03c7df4
8e5beee
f9f0b73
f306d19
8e5beee
 
 
f306d19
8e5beee
b30b5d8
 
 
 
 
 
 
 
2f8466b
0f3065b
5826562
 
9f584e5
8e5beee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from gradio import mix
import tensorflow as tf
import numpy as np
import torch
from keras.preprocessing.sequence import pad_sequences
import pickle

from huggingface_hub import from_pretrained_keras

model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")

def tokenize_data(text):
    # Tokenize the review body
    input_ =  str(text) + ' </s>'
    max_len = 80
    # tokenize inputs
    tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')

    inputs={"input_ids": tokenized_inputs['input_ids'],
        "attention_mask": tokenized_inputs['attention_mask']}
    return inputs

def generate_answers(text):
    sequence_test = tokenizer.texts_to_sequences([text])
    padded_test = pad_sequences(sequence_test, maxlen= 80, padding='post')
    predictions,_ = model.predict(padded_test)
    results = np.argmax(predictions, axis=1)[0]
    answer = tokenizer.sequences_to_texts([results] )
    answertoString = ' '.join([str(elem) for elem in answer])
    return answertoString
    
examples = [["Once upon a time, "], ["In a galaxy far far away"]]
title = "Text Generation with Miniature GPT"
description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."

iface = gr.Interface(fn=generate_answers, title = title, description=description, inputs=['text'], outputs=["text"], examples=examples)
iface.launch(inline=False, share=True)