RamAnanth1 commited on
Commit
730ef21
·
1 Parent(s): b463202

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -29,16 +29,19 @@ def process(model_A, model_B):
29
  prompt = sample["prompt"]
30
 
31
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
32
- response_A = df[df['model']==model_A]["output"]
 
 
 
 
33
  print(response_A)
34
- response_B = df[df['model']==model_B]["output"]
35
 
36
 
37
  input_text = "POST: "+ prompt+ "\n\n RESPONSE A: "+response_A+"\n\n RESPONSE B: "+response_B+"\n\n Which response is better? RESPONSE"
38
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
39
  y = model.generate(x, max_new_tokens=1)
40
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
41
- return pd.concat([response_A,response_B]), prefered
42
 
43
  title = "Compare Instruction Models to see which one is more helpful"
44
  interface = gr.Interface(fn=process,
 
29
  prompt = sample["prompt"]
30
 
31
  df = pd.DataFrame.from_records(sample["filtered_outputs"])
32
+ response_A_df = df[df['model']==model_A]["output"]
33
+ response_B_df= df[df['model']==model_B]["output"]
34
+
35
+ response_A = response_A_df.values
36
+ response_B = response_B_df.values
37
  print(response_A)
 
38
 
39
 
40
  input_text = "POST: "+ prompt+ "\n\n RESPONSE A: "+response_A+"\n\n RESPONSE B: "+response_B+"\n\n Which response is better? RESPONSE"
41
  x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
42
  y = model.generate(x, max_new_tokens=1)
43
  prefered = tokenizer.batch_decode(y, skip_special_tokens=True)[0]
44
+ return pd.concat([response_A_df,response_B_df]), prefered
45
 
46
  title = "Compare Instruction Models to see which one is more helpful"
47
  interface = gr.Interface(fn=process,