Update app.py
Browse filesremoving unclear pie chart
app.py
CHANGED
@@ -13,7 +13,7 @@ from src.about import (
|
|
13 |
LLM_BENCHMARKS_TEXT_1,
|
14 |
EVALUATION_EXAMPLE_IMG,
|
15 |
LLM_BENCHMARKS_TEXT_2,
|
16 |
-
ENTITY_DISTRIBUTION_IMG,
|
17 |
LLM_BENCHMARKS_TEXT_3,
|
18 |
TITLE,
|
19 |
LOGO
|
@@ -83,8 +83,6 @@ token_based_types_leaderboard_df = token_based_types_original_df.copy()
|
|
83 |
|
84 |
|
85 |
def update_df(evaluation_metric, shown_columns, subset="datasets"):
|
86 |
-
print(evaluation_metric)
|
87 |
-
|
88 |
if subset == "datasets":
|
89 |
match evaluation_metric:
|
90 |
case "Span Based":
|
@@ -506,7 +504,7 @@ with demo:
|
|
506 |
gr.Markdown(LLM_BENCHMARKS_TEXT_1, elem_classes="markdown-text")
|
507 |
gr.HTML(EVALUATION_EXAMPLE_IMG, elem_classes="logo")
|
508 |
gr.Markdown(LLM_BENCHMARKS_TEXT_2, elem_classes="markdown-text")
|
509 |
-
gr.HTML(ENTITY_DISTRIBUTION_IMG, elem_classes="logo")
|
510 |
gr.Markdown(LLM_BENCHMARKS_TEXT_3, elem_classes="markdown-text")
|
511 |
|
512 |
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
|
|
13 |
LLM_BENCHMARKS_TEXT_1,
|
14 |
EVALUATION_EXAMPLE_IMG,
|
15 |
LLM_BENCHMARKS_TEXT_2,
|
16 |
+
# ENTITY_DISTRIBUTION_IMG,
|
17 |
LLM_BENCHMARKS_TEXT_3,
|
18 |
TITLE,
|
19 |
LOGO
|
|
|
83 |
|
84 |
|
85 |
def update_df(evaluation_metric, shown_columns, subset="datasets"):
|
|
|
|
|
86 |
if subset == "datasets":
|
87 |
match evaluation_metric:
|
88 |
case "Span Based":
|
|
|
504 |
gr.Markdown(LLM_BENCHMARKS_TEXT_1, elem_classes="markdown-text")
|
505 |
gr.HTML(EVALUATION_EXAMPLE_IMG, elem_classes="logo")
|
506 |
gr.Markdown(LLM_BENCHMARKS_TEXT_2, elem_classes="markdown-text")
|
507 |
+
# gr.HTML(ENTITY_DISTRIBUTION_IMG, elem_classes="logo")
|
508 |
gr.Markdown(LLM_BENCHMARKS_TEXT_3, elem_classes="markdown-text")
|
509 |
|
510 |
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|