|
|
|
from argparse import ArgumentParser |
|
import numpy as np |
|
import tqdm |
|
import torch |
|
import re |
|
from torch import bfloat16 |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import json |
|
def creat_prompt(model_id, tokenizer=None, question=None, answer=None, pred=None,): |
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": |
|
"You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " |
|
"Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:\n" |
|
"------\n" |
|
"##INSTRUCTIONS:\n" |
|
"- Focus on the meaningful match between the predicted answer and the correct answer.\n" |
|
"- Consider synonyms or paraphrases as valid matches.\n" |
|
"- Evaluate the correctness of the prediction compared to the answer." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": |
|
"Please evaluate the following question-answer pair:\n\n" |
|
f"Question: {question.capitalize()}\n" |
|
f"Correct Answer: {answer.lower()}\n" |
|
f"Predicted Answer: {pred.lower()}\n\n" |
|
"Evaluate if the answer is correct with yes/no and assign a correctness score between 0 and 5, where 0 indicates incorrect answer, and 5 signifies the highest meaningful match. " |
|
"Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. " |
|
"DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " |
|
"For example, your response should look like this: {'pred': 'no', 'score': 0}." |
|
} |
|
] |
|
|
|
if 'mistralai' in model_id: |
|
prompt = f'<s>[INST] {messages[0]["content"].strip()}\n\n{messages[1]["content"].strip()} [/INST]' |
|
elif 'NousResearch' in model_id: |
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False) |
|
prompt = prompt + '<|im_start|>assistant' |
|
else: |
|
raise NotImplementedError |
|
return prompt |
|
|
|
def calculate_ins_level_score(results): |
|
acc = 0 |
|
ins_num = 0 |
|
for cat_results in results.values(): |
|
acc += cat_results['avg_score'] * cat_results['num_example'] |
|
ins_num += cat_results['num_example'] |
|
return 0 |
|
return acc / ins_num |
|
|
|
def LLM_eval(model_id, model, tokenizer, batch_size, samples, cuda=True): |
|
|
|
steps = int(np.ceil(len(samples) / batch_size)) |
|
evals = [] |
|
for step in tqdm.tqdm(range(steps)): |
|
prompts = [] |
|
for item in samples[step * batch_size: (step + 1) * batch_size]: |
|
question = item['question'].replace('<image>', '').strip() |
|
question = question.split('Context:')[0].strip() |
|
answer = item['answer'] |
|
pred = item['parsed_pred'] |
|
prompt = creat_prompt(model_id, tokenizer, question, answer, pred) |
|
prompts.append(prompt) |
|
|
|
inputs = tokenizer(prompts, return_tensors="pt", padding=True) |
|
if cuda: |
|
inputs = {k: v.cuda() for k, v in inputs.items()} |
|
with torch.inference_mode(): |
|
output_ids = model.generate( |
|
**inputs, |
|
use_cache=True, |
|
max_new_tokens=20, |
|
pad_token_id=tokenizer.eos_token_id, |
|
) |
|
outputs = tokenizer.batch_decode(output_ids[:, inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
|
evals.extend(outputs) |
|
judge_dict = dict() |
|
pred_correct = 0 |
|
score_sum = 0 |
|
|
|
for sample_data, sample_eval in zip(samples, evals): |
|
try: |
|
sample_eval = re.match(r".*(\{.*?\}).*", sample_eval, re.S).group(1) |
|
sample_eval = sample_eval.replace("'", '"') |
|
sample_eval = json.loads(sample_eval) |
|
pred = sample_eval['pred'] |
|
sample_score = sample_eval['score'] |
|
|
|
if pred == 'yes': |
|
judge_dict[sample_data['id']] = {'pred': 'Correct', 'score': sample_score} |
|
pred_correct += 1 |
|
else: |
|
judge_dict[sample_data['id']] = {'pred': 'Wrong', 'score': sample_score} |
|
score_sum += sample_score |
|
|
|
except: |
|
judge_dict[sample_data['id']] = {'pred': 'Wrong', 'score': 0} |
|
|
|
if len(samples) == 0: |
|
return {'acc': 0, 'avg_score': 0} |
|
return judge_dict, {'acc': pred_correct / len(samples), 'avg_score': score_sum / len(samples)} |
|
|
|
def run_mixtral_eval(): |
|
tokenizer = AutoTokenizer.from_pretrained(args.model_id) |