RamAnanth1 commited on
Commit
2f5c740
·
1 Parent(s): b1d6e77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -11,28 +11,38 @@ device = 'cpu' # if you have a GPU
11
  tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large')
12
  model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device)
13
 
 
 
 
 
 
 
14
  HF_TOKEN = os.getenv("HF_TOKEN")
15
 
16
  OUTPUTS_DATASET = "HuggingFaceH4/instruction-pilot-outputs-filtered"
17
 
18
  ds = load_dataset(OUTPUTS_DATASET, split="train", use_auth_token=HF_TOKEN)
19
 
20
- def process():
21
  sample_ds = ds.shuffle().select(range(1))
22
  sample = sample_ds[0]
23
  prompt = sample["prompt"]
24
 
25
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
 
 
 
26
 
27
  input_text = "POST: "+ prompt+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
28
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
29
  y = model.generate(x, max_new_tokens=1)
30
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
31
- return prefered, df
32
 
33
  title = "Compare Instruction Models to see which one is more helpful"
34
  interface = gr.Interface(fn=process,
35
- inputs=[],
 
36
  outputs=[
37
  gr.Textbox(label = "Preferred Option"),
38
  gr.DataFrame(label = "Model Responses")
 
11
  tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large')
12
  model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device)
13
 
14
+ model_list = [
15
+ 'google/flan-t5-xxl',
16
+ 'bigscience/bloomz-7b1',
17
+ 'facebook/opt-iml-max-30b',
18
+ 'allenai/tk-instruct-11b-def-pos']
19
+
20
  HF_TOKEN = os.getenv("HF_TOKEN")
21
 
22
  OUTPUTS_DATASET = "HuggingFaceH4/instruction-pilot-outputs-filtered"
23
 
24
  ds = load_dataset(OUTPUTS_DATASET, split="train", use_auth_token=HF_TOKEN)
25
 
26
+ def process(model_A, model_B):
27
  sample_ds = ds.shuffle().select(range(1))
28
  sample = sample_ds[0]
29
  prompt = sample["prompt"]
30
 
31
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
32
+ response_A = df[df['model']==model_A]
33
+ response_B = df[df['model']==model_B]
34
+
35
 
36
  input_text = "POST: "+ prompt+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
37
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
38
  y = model.generate(x, max_new_tokens=1)
39
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
40
+ return prefered, response_A
41
 
42
  title = "Compare Instruction Models to see which one is more helpful"
43
  interface = gr.Interface(fn=process,
44
+ inputs=[gr.Dropdown(choices=model_list, value=model_list[0], label='Model A'),
45
+ gr.Dropdown(choices=model_list, value=model_list[1], label='Model B')],
46
  outputs=[
47
  gr.Textbox(label = "Preferred Option"),
48
  gr.DataFrame(label = "Model Responses")