Pendrokar commited on
Commit
1593f23
·
1 Parent(s): cc1e3ba

leaderboard medals & links

Browse files
Files changed (2) hide show
  1. app/leaderboard.py +75 -29
  2. app/ui_leaderboard.py +12 -6
app/leaderboard.py CHANGED
@@ -3,38 +3,22 @@ from .db import *
3
  from .models import *
4
 
5
  import pandas as pd
6
- def get_leaderboard(reveal_prelim = False, hide_battle_votes = False):
 
7
  conn = get_db()
8
  cursor = conn.cursor()
9
-
10
- if hide_battle_votes:
11
- sql = '''
12
- SELECT m.name,
13
- SUM(CASE WHEN v.username NOT LIKE '%_battle' AND v.vote = 1 THEN 1 ELSE 0 END) as upvote,
14
- SUM(CASE WHEN v.username NOT LIKE '%_battle' AND v.vote = -1 THEN 1 ELSE 0 END) as downvote
15
- FROM model m
16
- LEFT JOIN vote v ON m.name = v.model
17
- GROUP BY m.name
18
- '''
19
- else:
20
- sql = '''
21
- SELECT name,
22
- SUM(CASE WHEN vote = 1 THEN 1 ELSE 0 END) as upvote,
23
- SUM(CASE WHEN vote = -1 THEN 1 ELSE 0 END) as downvote
24
- FROM model
25
- LEFT JOIN vote ON model.name = vote.model
26
- GROUP BY name
27
- '''
28
-
29
  cursor.execute(sql)
30
  data = cursor.fetchall()
31
- df = pd.DataFrame(data, columns=['name', 'upvote', 'downvote'])
 
32
  df['name'] = df['name'].replace(model_names)
 
 
33
  df['votes'] = df['upvote'] + df['downvote']
34
-
35
- # Filter out rows with insufficient votes if not revealing preliminary results
36
- if not reveal_prelim:
37
- df = df[df['votes'] > 500]
38
 
39
  ## ELO SCORE
40
  df['score'] = 1200
@@ -46,14 +30,76 @@ def get_leaderboard(reveal_prelim = False, hide_battle_votes = False):
46
  expected_b = 1 / (1 + 10 ** ((df['score'].iloc[i] - df['score'].iloc[j]) / 400))
47
  actual_a = df['upvote'].iloc[i] / df['votes'].iloc[i] if df['votes'].iloc[i] > 0 else 0.5
48
  actual_b = df['upvote'].iloc[j] / df['votes'].iloc[j] if df['votes'].iloc[j] > 0 else 0.5
49
- df.iloc[i, df.columns.get_loc('score')] += 32 * (actual_a - expected_a)
50
- df.iloc[j, df.columns.get_loc('score')] += 32 * (actual_b - expected_b)
51
  except Exception as e:
52
  print(f"Error in ELO calculation for rows {i} and {j}: {str(e)}")
53
  continue
54
  df['score'] = round(df['score'])
55
  ## ELO SCORE
56
  df = df.sort_values(by='score', ascending=False)
57
- df['order'] = ['#' + str(i + 1) for i in range(len(df))]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  df = df[['order', 'name', 'score', 'votes']]
59
  return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from .models import *
4
 
5
  import pandas as pd
6
+ def get_leaderboard(reveal_prelim = False):
7
+
8
  conn = get_db()
9
  cursor = conn.cursor()
10
+ sql = 'SELECT name, upvote, downvote, name AS orig_name FROM model'
11
+ # if not reveal_prelim: sql += ' WHERE EXISTS (SELECT 1 FROM model WHERE (upvote + downvote) > 750)'
12
+ if not reveal_prelim: sql += ' WHERE (upvote + downvote) > 300'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  cursor.execute(sql)
14
  data = cursor.fetchall()
15
+ df = pd.DataFrame(data, columns=['name', 'upvote', 'downvote', 'orig_name'])
16
+ # df['license'] = df['name'].map(model_license)
17
  df['name'] = df['name'].replace(model_names)
18
+ for i in range(len(df)):
19
+ df.loc[i, "name"] = make_link_to_space(df['name'][i], True)
20
  df['votes'] = df['upvote'] + df['downvote']
21
+ # df['score'] = round((df['upvote'] / df['votes']) * 100, 2) # Percentage score
 
 
 
22
 
23
  ## ELO SCORE
24
  df['score'] = 1200
 
30
  expected_b = 1 / (1 + 10 ** ((df['score'].iloc[i] - df['score'].iloc[j]) / 400))
31
  actual_a = df['upvote'].iloc[i] / df['votes'].iloc[i] if df['votes'].iloc[i] > 0 else 0.5
32
  actual_b = df['upvote'].iloc[j] / df['votes'].iloc[j] if df['votes'].iloc[j] > 0 else 0.5
33
+ df.at[i, 'score'] += round(32 * (actual_a - expected_a))
34
+ df.at[j, 'score'] += round(32 * (actual_b - expected_b))
35
  except Exception as e:
36
  print(f"Error in ELO calculation for rows {i} and {j}: {str(e)}")
37
  continue
38
  df['score'] = round(df['score'])
39
  ## ELO SCORE
40
  df = df.sort_values(by='score', ascending=False)
41
+ # medals
42
+ def assign_medal(rank, assign):
43
+ rank = str(rank + 1)
44
+ if assign:
45
+ if rank == '1':
46
+ rank += '🥇'
47
+ elif rank == '2':
48
+ rank += '🥈'
49
+ elif rank == '3':
50
+ rank += '🥉'
51
+
52
+ return '#'+ rank
53
+
54
+ df['order'] = [assign_medal(i, not reveal_prelim and len(df) > 2) for i in range(len(df))]
55
+ # fetch top_five
56
+ top_five = []
57
+ for orig_name in df['orig_name']:
58
+ if (
59
+ reveal_prelim
60
+ and len(top_five) < 5
61
+ and orig_name in AVAILABLE_MODELS.keys()
62
+ ):
63
+ top_five.append(orig_name)
64
+
65
  df = df[['order', 'name', 'score', 'votes']]
66
  return df
67
+
68
+ def make_link_to_space(model_name, for_leaderboard=False):
69
+ # create a anchor link if a HF space
70
+ style = 'text-decoration: underline;text-decoration-style: dotted;'
71
+ title = ''
72
+
73
+ if model_name in AVAILABLE_MODELS:
74
+ style += 'color: var(--link-text-color);'
75
+ title = model_name
76
+ else:
77
+ style += 'font-style: italic;'
78
+ title = 'Disabled for Arena (See AVAILABLE_MODELS within code for why)'
79
+
80
+ model_basename = model_name
81
+ if model_name in HF_SPACES:
82
+ model_basename = HF_SPACES[model_name]['name']
83
+
84
+ try:
85
+ if(
86
+ for_leaderboard
87
+ and HF_SPACES[model_name]['is_proprietary']
88
+ ):
89
+ model_basename += ' 🔐'
90
+ title += '; 🔐 = online only or proprietary'
91
+ except:
92
+ pass
93
+
94
+ if '/' in model_name:
95
+ return '🤗 <a target="_blank" style="'+ style +'" title="'+ title +'" href="'+ 'https://huggingface.co/spaces/'+ model_name +'">'+ model_basename +'</a>'
96
+
97
+ # otherwise just return the model name
98
+ return '<span style="'+ style +'" title="'+ title +'" href="'+ 'https://huggingface.co/spaces/'+ model_name +'">'+ model_name +'</span>'
99
+
100
+ def markdown_link_to_space(model_name):
101
+ # create a anchor link if a HF space using markdown syntax
102
+ if '/' in model_name:
103
+ return '���� [' + model_name + '](https://huggingface.co/spaces/' + model_name + ')'
104
+ # otherwise just return the model name
105
+ return model_name
app/ui_leaderboard.py CHANGED
@@ -5,13 +5,19 @@ from .messages import *
5
 
6
  with gr.Blocks() as leaderboard:
7
  gr.Markdown(LDESC)
8
- df = gr.Dataframe(interactive=False, min_width=0, wrap=True, column_widths=[30, 200, 50, 50])
 
 
 
 
 
 
9
  reloadbtn = gr.Button("Refresh")
10
  with gr.Row():
11
  reveal_prelim = gr.Checkbox(label="Reveal preliminary results", info="Show all models, including models with very few human ratings.", scale=1)
12
- hide_battle_votes = gr.Checkbox(label="Hide Battle Mode votes", info="Exclude votes obtained through Battle Mode.", scale=1)
13
- reveal_prelim.input(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
14
- hide_battle_votes.input(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
15
- leaderboard.load(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
16
- reloadbtn.click(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
17
  # gr.Markdown("DISCLAIMER: The licenses listed may not be accurate or up to date, you are responsible for checking the licenses before using the models. Also note that some models may have additional usage restrictions.")
 
5
 
6
  with gr.Blocks() as leaderboard:
7
  gr.Markdown(LDESC)
8
+ df = gr.Dataframe(
9
+ interactive=False,
10
+ min_width=0,
11
+ wrap=False,
12
+ column_widths=[30, 200, 50, 50],
13
+ datatype=["str", "html", "number", "number"]
14
+ )
15
  reloadbtn = gr.Button("Refresh")
16
  with gr.Row():
17
  reveal_prelim = gr.Checkbox(label="Reveal preliminary results", info="Show all models, including models with very few human ratings.", scale=1)
18
+ # hide_battle_votes = gr.Checkbox(label="Hide Battle Mode votes", info="Exclude votes obtained through Battle Mode.", scale=1)
19
+ reveal_prelim.input(get_leaderboard, inputs=[reveal_prelim], outputs=[df])
20
+ # hide_battle_votes.input(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
21
+ leaderboard.load(get_leaderboard, inputs=[reveal_prelim], outputs=[df])
22
+ reloadbtn.click(get_leaderboard, inputs=[reveal_prelim], outputs=[df])
23
  # gr.Markdown("DISCLAIMER: The licenses listed may not be accurate or up to date, you are responsible for checking the licenses before using the models. Also note that some models may have additional usage restrictions.")