import tensorflow as tf from tensorflow import keras from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import gradio as gr from gradio import mix import numpy as np import torch from keras.utils.data_utils import pad_sequences from huggingface_hub import from_pretrained_keras model = from_pretrained_keras("keras-io/text-generation-miniature-gpt") a = [] word_to_index = {} a_file = open("aclImdb/imdb.vocab") for line in a_file: a.append(line.strip()) print(len(a)) for index, word in enumerate(a): word_to_index[index] = word def text_process_pipeline(text): #pipeline tokenizer = Tokenizer(num_words=80, split=' ') tokenizer.fit_on_texts(word_to_index.values()) processed_text = tokenizer.texts_to_sequences(start_prompt) processed_text = pad_sequences(processed_text, maxlen=80, padding='post') return processed_text def sample_from(logits): l, i = tf.math.top_k(logits, k=10, sorted=True) indices = np.asarray(i).astype("int32") preds = keras.activations.softmax(tf.expand_dims(l, 0))[0] preds = np.asarray(preds).astype("float32") return np.random.choice(i, p=preds) def generate_answers(start_prompt): num_tokens_generated = 0 sample_index = len(start_prompt) - 1 start_tokens = [word_to_index.get(_, 1) for _ in start_prompt] tokens_generated= [] text_out = text_process_pipeline(start_prompt) predictions,_ = mode.predict(text_out) results = np.argmax(predictions, axis=1)[0] while num_tokens_generated <= 40: sample_token = sample_from(predictions[0][sample_index]) tokens_generated.append(sample_token) start_tokens.append(sample_token) num_tokens_generated = len(tokens_generated) text_out = tokenizer.sequences_to_texts([tokens_generated]) return text_out[0] examples = [["The movie was nice, "], ["It was showing nothing special to "]] title = "Text Generation with Miniature GPT" description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below." iface = gr.Interface(fn=generate_answers, title = title, description=description, inputs=['text'], outputs=["text"], examples=examples) iface.launch(debug=True)