import gradio as gr from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns, SearchColumns import pandas as pd from apscheduler.schedulers.background import BackgroundScheduler from huggingface_hub import snapshot_download import os, json from src.envs import API from src.about import ( CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, EVALUATION_QUEUE_TEXT, INTRODUCTION_TEXT, LLM_BENCHMARKS_TEXT, TITLE, ) from src.display.css_html_js import custom_css from src.display.utils import ( BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES, AutoEvalColumn, ModelType, fields, WeightType, Precision ) from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN from src.populate import get_evaluation_queue_df, get_leaderboard_df from src.submission.submit import add_new_eval def restart_space(): API.restart_space(repo_id=REPO_ID) ### Space initialisation try: print(EVAL_REQUESTS_PATH) snapshot_download( repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN ) except Exception: restart_space() try: print(EVAL_RESULTS_PATH) snapshot_download( repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN ) except Exception: restart_space() # LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) import jsonlines # Initialize an empty list to store the JSON objects json_list = [] # Open the JSONL file with jsonlines.open('commit_results.jsonl') as reader: for obj in reader: # Append each JSON object to the list json_list.append(obj) # _test_data = pd.DataFrame({"Score": [54,46,53], "Name": ["MageBench", "MageBench", "MageBench"], "BaseModel": ["GPT-4o", "GPT-4o", "LLaMA"], "Env.": ["Sokoban", "Sokoban", "Football"], # "Target-research": ["Model-Eval-Global", "Model-Eval-Online", "Agent-Eval-Prompt"], "Subset": ["mini", "all", "mini"], "Link": ["xxx", "xxx", "xxx"]}) json_list = sorted(json_list, key=lambda x: x['Score'], reverse=True) committed = pd.DataFrame(json_list) ( finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df, ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) def init_leaderboard(dataframe): if dataframe is None or dataframe.empty: raise ValueError("Leaderboard DataFrame is empty or None.") return Leaderboard( value=dataframe, #dataframe, select_columns=SelectColumns( default_selection=["Score", "Name", "BaseModel", "Env.", "Target-research", "Subset", "Link"], cant_deselect=["Score", "Name",], label="Select Columns to Display:", ), search_columns=SearchColumns(primary_column="Name", secondary_columns=["BaseModel", "Target-research"], placeholder="Search by work name or basemodel. To search by country, type 'basemodel:'", label="Search"), filter_columns=[ ColumnFilter("Target-research", type="checkboxgroup", label="Comparison settings for target researches (Single Selection)"), # ColumnFilter("BaseModel", type="dropdown", label="Select The base lmm model that fultill the task."), ColumnFilter("Env.", type="checkboxgroup", label="Environment (Single Selection)"), ColumnFilter("Subset", type="checkboxgroup", label="Subset (Single Selection)"), ColumnFilter("State", type="checkboxgroup", label="Result state (checked or under-review)"), # ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"), # ColumnFilter( # AutoEvalColumn.params.name, # type="slider", # min=0.01, # max=150, # label="Select the number of parameters (B)", # ), # ColumnFilter( # AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True # ), ], interactive=False, ) all_submissions = [] demo = gr.Blocks(css=custom_css) with demo: gr.HTML(TITLE) gr.Video('demo.mp4', elem_id="video-player", label="Introduction Video") gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") with gr.Tabs(elem_classes="tab-buttons") as tabs: with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): leaderboard = init_leaderboard(committed) # LEADERBOARD_DF with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2): gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3): with gr.Column(): with gr.Row(): gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") with gr.Column(): with gr.Row(): score_input = gr.Textbox(label="Score (float)", placeholder="请输入分数") name_input = gr.Textbox(label="Name (str)", placeholder="请输入名称") base_model_input = gr.Textbox(label="BaseModel (str)", placeholder="请输入基模型名称") with gr.Row(): env_dropdown = gr.Dropdown( choices=["Sokoban", "Football", "WebUI"], label="Env.", value="Sokoban" ) target_research_dropdown = gr.Dropdown( choices=["Model-Eval-Online", "Model-Eval-Global", "Agent-Eval-Prompt", "Agent-Eval-Finetune"], label="Target-research", value="Model-Eval-Online" ) subset_dropdown = gr.Dropdown( choices=["mini", "all"], label="Subset", value="mini" ) link_input = gr.Textbox(label="Link (str)", placeholder="请输入链接") submit_button = gr.Button("Upload One Eval") with gr.Row(): clear_button = gr.Button("Clear Uploads") submit_all_button = gr.Button("Submit All") submission_result = gr.Markdown("## Uploaded results") def submit_eval(score, name, base_model, env, target_research, subset, link): # 处理单条数据提交 result = { "Score": float(score), "Name": name, "BaseModel": base_model, "Env.": env, "Target-research": target_research, "Subset": subset, "Link": link, "State": "Checking" } # 将结果添加到全局变量中 global all_submissions all_submissions.append(result) # 更新页面展示 display_text = "\n".join([json.dumps(submission) for submission in all_submissions]) return gr.Markdown("## Uploaded results\n\n```json\n"+display_text+"\n```") def submit_all(): json_list = [] with jsonlines.open('commit_results.jsonl') as reader: for obj in reader: json_list.append(obj) global all_submissions if len(all_submissions)>0: json_list.extend(all_submissions) tmp_path = "tmp-output.json" with jsonlines.open(tmp_path, mode='w') as writer: writer.write_all(json_list) print("Uploading eval file") API.upload_file( path_or_fileobj=tmp_path, path_in_repo='commit_results.jsonl', repo_id="microsoft/MageBench-Leaderboard", repo_type="space", commit_message=f"Add submissions to checking queue", ) all_submissions = [] return gr.Markdown("## All submissions uploaded successfully! \nThis will re-start the space...") else: return gr.Markdown("Please click Upload One Eval to upload some results before you submit.") def clear(): global all_submissions all_submissions = [] return gr.Markdown("## Uploaded results") # 单条数据提交按钮点击事件 submit_button.click( submit_eval, [score_input, name_input, base_model_input, env_dropdown, target_research_dropdown, subset_dropdown, link_input], submission_result ) # 所有数据提交按钮点击事件 submit_all_button.click( submit_all, inputs=[], outputs=submission_result ) clear_button.click( clear, [], submission_result ) with gr.Row(): with gr.Accordion("📙 Citation", open=False): citation_button = gr.Textbox( value=CITATION_BUTTON_TEXT, label=CITATION_BUTTON_LABEL, lines=20, elem_id="citation-button", show_copy_button=True, ) scheduler = BackgroundScheduler() scheduler.add_job(restart_space, "interval", seconds=1800) scheduler.start() demo.queue(default_concurrency_limit=40).launch()