mgyigit commited on
Commit
6962b8e
·
verified ·
1 Parent(s): ffdf4c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -64,7 +64,7 @@ with block:
64
 
65
  # Leaderboard section with method and metric selectors
66
  leaderboard_method_selector = gr.CheckboxGroup(
67
- choices=method_names, label="Select method_names for Leaderboard", value=method_names, interactive=True
68
  )
69
  leaderboard_metric_selector = gr.CheckboxGroup(
70
  choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
@@ -75,7 +75,7 @@ with block:
75
  baseline_header = ["method_name"] + metric_names
76
  baseline_datatype = ['markdown'] + ['number'] * len(metric_names)
77
 
78
- with gr.Row(show_progress=True):
79
  data_component = gr.components.Dataframe(
80
  value=baseline_value,
81
  headers=baseline_header,
@@ -84,7 +84,7 @@ with block:
84
  interactive=False,
85
  visible=True,
86
  )
87
-
88
  # Update leaderboard when method/metric selection changes
89
  leaderboard_method_selector.change(
90
  update_leaderboard,
@@ -97,7 +97,15 @@ with block:
97
  outputs=data_component
98
  )
99
 
100
-
 
 
 
 
 
 
 
 
101
  # Dropdown for benchmark type
102
  benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
103
 
@@ -114,8 +122,9 @@ with block:
114
  # Button to draw the plot for the selected benchmark
115
 
116
  plot_button = gr.Button("Plot")
117
-
118
- plot_output = gr.Image(label="Plot")
 
119
 
120
  # Update selectors when benchmark type changes
121
  benchmark_type_selector.change(
 
64
 
65
  # Leaderboard section with method and metric selectors
66
  leaderboard_method_selector = gr.CheckboxGroup(
67
+ choices=method_names, label="Select Methods for Leaderboard", value=method_names, interactive=True
68
  )
69
  leaderboard_metric_selector = gr.CheckboxGroup(
70
  choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
 
75
  baseline_header = ["method_name"] + metric_names
76
  baseline_datatype = ['markdown'] + ['number'] * len(metric_names)
77
 
78
+ with gr.Row(show_progress=True, variant='panel'):
79
  data_component = gr.components.Dataframe(
80
  value=baseline_value,
81
  headers=baseline_header,
 
84
  interactive=False,
85
  visible=True,
86
  )
87
+
88
  # Update leaderboard when method/metric selection changes
89
  leaderboard_method_selector.change(
90
  update_leaderboard,
 
97
  outputs=data_component
98
  )
99
 
100
+ with gr.Row():
101
+ gr.Markdown(
102
+ """
103
+ **Below, you can visualize the results displayed in the Leaderboard.**
104
+ Once you choose a benchmark type, the related options for metrics, datasets, and other parameters will become visible.
105
+ Select the methods and metrics of interest from the options above to generate visualizations.
106
+ """
107
+ )
108
+
109
  # Dropdown for benchmark type
110
  benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
111
 
 
122
  # Button to draw the plot for the selected benchmark
123
 
124
  plot_button = gr.Button("Plot")
125
+
126
+ with gr.Row(show_progress=True, variant='panel'):
127
+ plot_output = gr.Image(label="Plot")
128
 
129
  # Update selectors when benchmark type changes
130
  benchmark_type_selector.change(