Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pandas as pd | |
from datasets import load_dataset | |
from transformers import T5ForConditionalGeneration, T5Tokenizer | |
device = 'cpu' # if you have a GPU | |
tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large') | |
model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device) | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
OUTPUTS_DATASET = "HuggingFaceH4/instruction-pilot-outputs-filtered" | |
ds = load_dataset(OUTPUTS_DATASET, split="train", use_auth_token=HF_TOKEN) | |
def process(): | |
sample_ds = ds.shuffle().select(range(1)) | |
df = pd.DataFrame.from_records(sample["filtered_outputs"]) | |
input_text = "POST: "+ sample["prompt"]+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE" | |
x = tokenizer([input_text], return_tensors='pt').input_ids.to(device) | |
y = model.generate(x, max_new_tokens=1) | |
prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0] | |
return sample["filtered_outputs"] | |
title = "Compare Instruction Models to see which one is more helpful" | |
interface = gr.Interface(fn=process, | |
inputs=[], | |
outputs=[ | |
gr.Textbox(label = "Responses") | |
], | |
title=title, | |
) | |
interface.launch(debug=True) |