Spaces:
Build error
Build error
import gradio as gr | |
from gradio import mix | |
import tensorflow as tf | |
import numpy as np | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
from huggingface_hub import from_pretrained_keras | |
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") | |
model = from_pretrained_keras("keras-io/text-generation-miniature-gpt") | |
def tokenize_data(text): | |
# Tokenize the review body | |
input_ = str(text) + ' </s>' | |
max_len = 80 | |
# tokenize inputs | |
tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt') | |
inputs={"input_ids": tokenized_inputs['input_ids'], | |
"attention_mask": tokenized_inputs['attention_mask']} | |
return inputs | |
def generate_answers(text): | |
inputs = tokenize_data(text) | |
x = tf.dtypes.cast(inputs['input_ids'], tf.int64) | |
predictions, _ = model.predict(x) | |
#predictions, _ = model.predict(inputs['input_ids']) | |
results = np.argmax(predictions, axis=1) | |
answer = tokenizer.decode(results[0].flatten(), skip_special_tokens=True) | |
return answer | |
examples = [["Once upon a time, "], ["In a galaxy far far away"]] | |
title = "Text Generation with Miniature GPT" | |
description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below." | |
iface = gr.Interface(fn=generate_answers, title = title, description=description, inputs=['text'], outputs=["text"], examples=examples) | |
iface.launch(inline=False, share=True) |