RamAnanth1 commited on
Commit
b1d6e77
·
1 Parent(s): ce419f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -20,21 +20,23 @@ ds = load_dataset(OUTPUTS_DATASET, split="train", use_auth_token=HF_TOKEN)
20
  def process():
21
  sample_ds = ds.shuffle().select(range(1))
22
  sample = sample_ds[0]
23
-
24
 
25
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
26
 
27
- input_text = "POST: "+ sample["prompt"]+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
28
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
29
  y = model.generate(x, max_new_tokens=1)
30
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
31
- return sample["filtered_outputs"]
32
 
33
  title = "Compare Instruction Models to see which one is more helpful"
34
  interface = gr.Interface(fn=process,
35
  inputs=[],
36
  outputs=[
37
- gr.Textbox(label = "Responses")
 
 
38
  ],
39
  title=title,
40
  )
 
20
  def process():
21
  sample_ds = ds.shuffle().select(range(1))
22
  sample = sample_ds[0]
23
+ prompt = sample["prompt"]
24
 
25
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
26
 
27
+ input_text = "POST: "+ prompt+ "\n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
28
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
29
  y = model.generate(x, max_new_tokens=1)
30
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
31
+ return prefered, df
32
 
33
  title = "Compare Instruction Models to see which one is more helpful"
34
  interface = gr.Interface(fn=process,
35
  inputs=[],
36
  outputs=[
37
+ gr.Textbox(label = "Preferred Option"),
38
+ gr.DataFrame(label = "Model Responses")
39
+
40
  ],
41
  title=title,
42
  )