File size: 1,476 Bytes
debfcf8
 
 
 
 
 
 
 
 
 
 
 
d7f1630
 
 
 
 
 
debfcf8
d7f1630
 
 
 
 
 
 
debfcf8
 
d7f1630
 
debfcf8
 
 
dff33be
9226230
debfcf8
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr 

import pandas as pd
from datasets import load_dataset


from transformers import T5ForConditionalGeneration, T5Tokenizer
device = 'cpu' # if you have a GPU

tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large')
model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device)

HF_TOKEN = os.getenv("HF_TOKEN")

OUTPUTS_DATASET = "HuggingFaceH4/instruction-pilot-outputs-filtered"

ds = load_dataset(OUTPUTS_DATASET, split="train", use_auth_token=HF_TOKEN)

def process():
    sample_ds = ds.shuffle().select(range(1))

    
    
    df = pd.DataFrame.from_records(sample["filtered_outputs"])
        
    input_text = "POST: "+ sample["prompt"]+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
    x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
    y = model.generate(x, max_new_tokens=1)
    prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0] 
    return sample["filtered_outputs"]

title = "Compare Instruction Models to see which one is more helpful"
interface = gr.Interface(fn=process, 
                     inputs=[],
                     outputs=[
                              gr.Textbox(label = "Responses")
                              ],
                     title=title,
                     )
                     
interface.launch(debug=True)