Spaces:
Running
Running
import os | |
import jsonlines | |
from collections import defaultdict | |
import pandas as pd | |
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, matthews_corrcoef, confusion_matrix | |
RESULT_ROOTS = "./result" | |
LANGUAGE_MAP = { | |
"all": "All", | |
"C": "C/C++", | |
"C++": "C/C++", | |
"Java": "Java", | |
"Python": "Python", | |
} | |
table_dict = {} | |
for method in os.listdir(RESULT_ROOTS): | |
msg_labels = defaultdict(list) | |
msg_predicts = defaultdict(list) | |
msg_metrics = {} | |
nomsg_labels = defaultdict(list) | |
nomsg_predicts = defaultdict(list) | |
nomsg_metrics = {} | |
mix_labels = defaultdict(list) | |
mix_predicts = defaultdict(list) | |
mix_metrics = {} | |
msg_result_file = os.path.join(RESULT_ROOTS, method, "msg.jsonl") | |
nomsg_result_file = os.path.join(RESULT_ROOTS, method, "nomsg.jsonl") | |
if not os.path.exists(msg_result_file) or not os.path.exists(nomsg_result_file): | |
continue | |
with jsonlines.open(msg_result_file) as reader: | |
for item in reader: | |
lang = LANGUAGE_MAP[item["language"]] | |
for section in item["sections"]: | |
msg_labels["All"].append(section['related']) | |
msg_predicts["All"].append(section['predict']) | |
msg_labels[lang].append(section['related']) | |
msg_predicts[lang].append(section['predict']) | |
mix_labels["All"].append(section['related']) | |
mix_predicts["All"].append(section['predict']) | |
mix_labels[lang].append(section['related']) | |
mix_predicts[lang].append(section['predict']) | |
with jsonlines.open(nomsg_result_file) as reader: | |
for item in reader: | |
lang = LANGUAGE_MAP[item["language"]] | |
for section in item["sections"]: | |
nomsg_labels["All"].append(section['related']) | |
nomsg_predicts["All"].append(section['predict']) | |
nomsg_labels[lang].append(section['related']) | |
nomsg_predicts[lang].append(section['predict']) | |
mix_labels["All"].append(section['related']) | |
mix_predicts["All"].append(section['predict']) | |
mix_labels[lang].append(section['related']) | |
mix_predicts[lang].append(section['predict']) | |
for lang in LANGUAGE_MAP.values(): | |
accuracy = accuracy_score(msg_labels[lang], msg_predicts[lang]) | |
# precision = precision_score(msg_labels[lang], msg_predicts[lang]) | |
# recall = recall_score(msg_labels[lang], msg_predicts[lang]) | |
f1 = f1_score(msg_labels[lang], msg_predicts[lang]) | |
mcc = matthews_corrcoef(msg_labels[lang], msg_predicts[lang]) | |
tp, fp, tn, fn = confusion_matrix(msg_labels[lang], msg_predicts[lang]).ravel() | |
fpr = fp / (fp + tn + 1e-6) | |
msg_metrics.update({ | |
f"{lang}_Acc": f"{accuracy * 100:.2f}\\%", | |
# f"{lang}_P": f"{precision * 100:.2f}%", | |
# f"{lang}_R": f"{recall * 100:.2f}%", | |
f"{lang}_F1": f"{f1 * 100:.2f}\\%", | |
# f"{lang}_FPR": f"{fpr * 100:.2f}\\%", | |
f"{lang}_MCC": f"{mcc * 100:.2f}\\%" | |
}) | |
accuracy = accuracy_score(nomsg_labels[lang], nomsg_predicts[lang]) | |
# precision = precision_score(nomsg_labels[lang], nomsg_predicts[lang]) | |
# recall = recall_score(nomsg_labels[lang], nomsg_predicts[lang]) | |
f1 = f1_score(nomsg_labels[lang], nomsg_predicts[lang]) | |
mcc = matthews_corrcoef(nomsg_labels[lang], nomsg_predicts[lang]) | |
tp, fp, tn, fn = confusion_matrix(nomsg_labels[lang], nomsg_predicts[lang]).ravel() | |
fpr = fp / (fp + tn + 1e-6) | |
nomsg_metrics.update({ | |
f"{lang}_Acc": f"{accuracy * 100:.2f}\\%", | |
# f"{lang}_P": f"{precision * 100:.2f}%", | |
# f"{lang}_R": f"{recall * 100:.2f}%", | |
f"{lang}_F1": f"{f1 * 100:.2f}\\%", | |
# f"{lang}_FPR": f"{fpr * 100:.2f}\\%", | |
f"{lang}_MCC": f"{mcc * 100:.2f}\\%" | |
}) | |
accuracy = accuracy_score(mix_labels[lang], mix_predicts[lang]) | |
# precision = precision_score(mix_labels[lang], mix_predicts[lang]) | |
# recall = recall_score(mix_labels[lang], mix_predicts[lang]) | |
f1 = f1_score(mix_labels[lang], mix_predicts[lang]) | |
mcc = matthews_corrcoef(mix_labels[lang], mix_predicts[lang]) | |
tp, fp, tn, fn = confusion_matrix(mix_labels[lang], mix_predicts[lang]).ravel() | |
fpr = fp / (fp + tn + 1e-6) | |
mix_metrics.update({ | |
f"{lang}_Acc": f"{accuracy * 100:.2f}\\%", | |
# f"{lang}_P": f"{precision * 100:.2f}%", | |
# f"{lang}_R": f"{recall * 100:.2f}%", | |
f"{lang}_F1": f"{f1 * 100:.2f}\\%", | |
# f"{lang}_FPR": f"{fpr * 100:.2f}\\%", | |
f"{lang}_MCC": f"{mcc * 100:.2f}\\%" | |
}) | |
table_dict[method] = mix_metrics | |
if method == "patchouli": | |
table_dict[f"{method}_msg"] = msg_metrics | |
table_dict[f"{method}_nomsg"] = nomsg_metrics | |
df = pd.DataFrame(table_dict).T | |
df.to_csv("result.csv") | |