TheoLvs commited on
Commit
103fbf9
Β·
1 Parent(s): 6476ab6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -178
app.py CHANGED
@@ -1,129 +1,96 @@
1
  import gradio as gr
2
  from codecarbon import EmissionsTracker
3
- from datasets import load_dataset
4
- import numpy as np
5
- from sklearn.metrics import accuracy_score
6
- import random
7
  import os
8
  import json
9
  from datetime import datetime
 
10
  from huggingface_hub import HfApi
11
- from huggingface_hub import upload_file
12
  import tempfile
13
  from dotenv import load_dotenv
14
- import spaces
15
 
16
- # Use dotenv to load the environment variables
17
  load_dotenv()
18
 
19
  # Get environment variables
20
- HF_TOKEN = os.getenv("HF_TOKEN_TEXT")
21
- DATASET_NAME = os.getenv("DATASET_NAME", "QuotaClimat/frugalaichallenge-text-train") # Default to public dataset
22
- TEST_SIZE = float(os.getenv("TEST_SIZE", "0.2")) # Default to 20% test size
23
- TEST_SEED = int(os.getenv("TEST_SEED", "42")) # Default seed for reproducibility
24
-
25
- print(f"Using dataset: {DATASET_NAME}")
26
- print(f"Test split size: {TEST_SIZE}")
27
-
28
  if not HF_TOKEN:
29
  print("Warning: HF_TOKEN not found in environment variables. Submissions will not work.")
30
 
31
- # Initialize carbon emissions tracker with CodeCarbon
32
- tracker = EmissionsTracker(allow_multiple_runs=True)
33
-
34
- #--------------------------------------------------------------------------------------------
35
- # FUNCTION TO UPDATE WITH YOUR MODEL SUBMISSION
36
- #--------------------------------------------------------------------------------------------
37
-
38
- @spaces.GPU
39
- def evaluate(model_description):
40
- # Get space info
41
- username, space_url = get_space_info()
42
-
43
- # Initialize tracker
44
- tracker.start()
45
- tracker.start_task("inference")
46
-
47
- #--------------------------------------------------------------------------------------------
48
- # YOUR MODEL INFERENCE CODE HERE
49
- # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
50
- #--------------------------------------------------------------------------------------------
51
-
52
- # Make random predictions
53
- true_labels = test_dataset["label"]
54
- predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
55
-
56
- #--------------------------------------------------------------------------------------------
57
- # YOUR MODEL INFERENCE STOPS HERE
58
- #--------------------------------------------------------------------------------------------
59
-
60
- # Stop tracking emissions
61
- emissions_data = tracker.stop_task()
62
-
63
- # Calculate accuracy
64
- accuracy = accuracy_score(true_labels, predictions)
65
-
66
- # Prepare complete results
67
- results = {
68
- "username": username,
69
- "space_url": space_url,
70
- "submission_timestamp": datetime.now().isoformat(),
71
- "model_description": model_description if model_description else "No description provided",
72
- "accuracy": float(accuracy),
73
- "energy_consumed_wh": emissions_data.energy_consumed * 1000,
74
- "emissions_gco2eq": emissions_data.emissions * 1000,
75
- "emissions_data": clean_emissions_data(emissions_data)
76
  }
77
-
78
- # Return both summary and detailed results
79
- return [
80
- accuracy,
81
- emissions_data.emissions * 1000,
82
- emissions_data.energy_consumed * 1000,
83
- json.dumps(results, indent=2)
84
- ]
85
 
86
 
87
- #--------------------------------------------------------------------------------------------
88
- # HELPER FUNCTIONS
89
- #--------------------------------------------------------------------------------------------
 
 
90
 
91
- # Function to get space username and URL
92
- def get_space_info():
93
- space_name = os.getenv("SPACE_ID", "")
94
- if space_name:
95
- try:
96
- username = space_name.split("/")[0]
97
- space_url = f"https://huggingface.co/spaces/{space_name}"
98
- return username, space_url
99
- except Exception as e:
100
- print(f"Error getting space info: {e}")
101
- return "local-user", "local-development"
102
 
103
- def clean_emissions_data(emissions_data):
104
- """Remove unwanted fields from emissions data"""
105
- data_dict = emissions_data.__dict__
106
- fields_to_remove = ['timestamp', 'project_name', 'experiment_id', 'latitude', 'longitude']
107
- return {k: v for k, v in data_dict.items() if k not in fields_to_remove}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
 
110
- def submit_results(results_json):
111
  if not results_json:
112
  return gr.Warning("No results to submit")
113
 
114
- # Check if we're in a Space or have admin dev rights
115
- space_name = os.getenv("SPACE_ID")
116
- is_admin_dev = os.getenv("ADMIN_DEV") == "true"
117
-
118
- if not space_name and not is_admin_dev:
119
- message = "You cannot submit your model locally, you need to deploy it as a Hugging Face Space first, and then submit it."
120
- return gr.Warning(message)
121
-
122
  if not HF_TOKEN:
123
  return gr.Warning("HF_TOKEN not found. Please set up your Hugging Face token.")
124
 
125
  try:
126
- # results_json is already a dict from gr.JSON
127
  results_str = json.dumps(results_json)
128
 
129
  # Create a temporary file with the results
@@ -131,13 +98,20 @@ def submit_results(results_json):
131
  f.write(results_str)
132
  temp_path = f.name
133
 
134
- # Upload to the dataset
135
  api = HfApi(token=HF_TOKEN)
136
  path_in_repo = f"submissions/{results_json['username']}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
 
 
 
 
 
 
 
137
  api.upload_file(
138
  path_or_fileobj=temp_path,
139
  path_in_repo=path_in_repo,
140
- repo_id="frugal-ai-challenge/public-leaderboard-text",
141
  repo_type="dataset",
142
  token=HF_TOKEN
143
  )
@@ -148,69 +122,43 @@ def submit_results(results_json):
148
  return gr.Info("Results submitted successfully to the leaderboard! πŸŽ‰")
149
  except Exception as e:
150
  return gr.Warning(f"Error submitting results: {str(e)}")
151
-
152
-
153
- #--------------------------------------------------------------------------------------------
154
- # DATASET PREPARATION
155
- #--------------------------------------------------------------------------------------------
156
-
157
- # Define the label mapping
158
- LABEL_MAPPING = {
159
- "0_not_relevant": 0, # No relevant claim detected
160
- "1_not_happening": 1, # Global warming is not happening
161
- "2_not_human": 2, # Not caused by humans
162
- "3_not_bad": 3, # Not bad or beneficial
163
- "4_solutions_harmful_unnecessary": 4, # Solutions harmful/unnecessary
164
- "5_science_unreliable": 5, # Science is unreliable
165
- "6_proponents_biased": 6, # Proponents are biased
166
- "7_fossil_fuels_needed": 7 # Fossil fuels are needed
167
- }
168
-
169
- # Load and prepare the dataset
170
- print("Loading dataset...")
171
- dataset = load_dataset(DATASET_NAME)
172
-
173
- # Convert string labels to integers
174
- dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]})
175
-
176
- # Split dataset
177
- train_test = dataset["train"].train_test_split(test_size=TEST_SIZE, seed=TEST_SEED)
178
- train_dataset = train_test["train"]
179
- test_dataset = train_test["test"]
180
-
181
- #--------------------------------------------------------------------------------------------
182
- # GRADIO INTERFACE
183
- #--------------------------------------------------------------------------------------------
184
 
185
  # Create the demo interface
186
  with gr.Blocks() as demo:
187
 
188
  gr.Image("./logo.png", show_label=False, container=False)
189
-
190
  gr.Markdown("""
191
- # πŸ“œ Frugal AI Challenge - Text task - Submission portal
192
- ## Climate Disinformation Classification
193
- """)
194
-
195
  with gr.Tabs():
196
 
 
197
  with gr.Tab("Instructions"):
198
 
199
  gr.Markdown("""
200
- To submit your results, please follow the steps below:
201
 
202
  ## Prepare your model submission
203
- 1. Clone the space of this portal on your own Hugging Face account.
204
- 2. Modify the ``evaluate`` function to replace the baseline by your model loading and inference within the inference pass where the energy consumption and emissions are tracked.
205
  3. Eventually complete the requirements and/or any necessaries dependencies in your space.
206
- 4. Write down your model card in the ``modelcard.md`` file.
207
- 5. Deploy your space and verify that it works.
208
  6. (Optional) You can change the Space hardware to use any GPU directly on Hugging Face.
209
 
210
  ## Submit your model to the leaderboard in the ``Model Submission`` tab
211
- 7. Step 1 - Evaluate model: Click on the button to evaluate your model. This will run you model, computes the accuracy on the test set (20% of the train set), and track the energy consumption and emissions.
212
- 8. Step 2 - Submit to leaderboard: Click on the button to submit your results to the leaderboard. This will upload the results to the leaderboard dataset and update the leaderboard.
213
- 9. You can see the leaderboard at https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-text
 
 
 
 
 
 
 
214
 
215
  ## About
216
  > You can find more information about the Frugal AI Challenge 2025 on the [Frugal AI Challenge website](https://frugal-ai-challenge.org/).
@@ -222,50 +170,123 @@ The challenge is organized by Hugging Face, Data For Good, and the French Minist
222
  The goal of the Frugal AI Challenge is to encourage both academic and industry actors to keep efficiency in mind when deploying AI models. By tracking both energy consumption and performance for different AI tasks, we can incentivize frugality in AI deployment while also addressing real-world challenges.
223
  """)
224
 
225
- with gr.Tab("Model Submission"):
226
-
227
  with gr.Row():
228
- model_description = gr.Textbox(
229
- label="Model Description (one sentence)",
230
- placeholder="Describe your model in one sentence...",
231
- value="Random baseline",
232
- lines=2
 
 
 
 
233
  )
234
 
235
  with gr.Row():
236
  with gr.Column(scale=1):
237
- evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
238
  with gr.Column(scale=1):
239
- submit_btn = gr.Button("2. Submit to leaderboard", variant="primary", size="lg")
240
 
241
  with gr.Row():
242
- accuracy_output = gr.Number(label="Accuracy", precision=4)
243
- energy_output = gr.Number(label="Energy Consumed (Wh)", precision=12)
244
- emissions_output = gr.Number(label="Emissions (gCO2eq)", precision=12)
245
  with gr.Row():
246
- results_json = gr.JSON(label="Detailed Results", visible=True)
247
 
248
- evaluate_btn.click(
249
- evaluate,
250
- inputs=[model_description],
251
- outputs=[accuracy_output, emissions_output, energy_output, results_json]
252
- )
 
 
 
 
 
 
 
 
253
 
254
- submit_btn.click(
255
- submit_results,
256
- inputs=[results_json],
257
- outputs=None # No need for output component with popups
258
- )
 
 
 
 
 
 
 
259
 
260
- with gr.Tab("Model Card"):
261
- with open("README.md", "r") as f:
262
- content = f.read()
263
- # Remove the YAML header (content between --- markers)
264
- if content.startswith("---"):
265
- second_marker = content.find("---", 3)
266
- if second_marker != -1:
267
- content = content[second_marker + 3:].strip()
268
- gr.Markdown(content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
  if __name__ == "__main__":
271
- demo.launch()
 
1
  import gradio as gr
2
  from codecarbon import EmissionsTracker
 
 
 
 
3
  import os
4
  import json
5
  from datetime import datetime
6
+ import requests
7
  from huggingface_hub import HfApi
 
8
  import tempfile
9
  from dotenv import load_dotenv
 
10
 
11
+ # Load environment variables
12
  load_dotenv()
13
 
14
  # Get environment variables
15
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
 
 
 
 
 
 
16
  if not HF_TOKEN:
17
  print("Warning: HF_TOKEN not found in environment variables. Submissions will not work.")
18
 
19
+ api = HfApi(token=HF_TOKEN)
20
+
21
+
22
+ DEFAULT_PARAMS = {
23
+ "text":{
24
+ "dataset_name": "QuotaClimat/frugalaichallenge-text-train",
25
+ "test_size": 0.2, # must be between 0 and 1
26
+ "test_seed": 42, # must be non-negative
27
+ },
28
+ "image":{
29
+ "dataset_name": "QuotaClimat/frugalaichallenge-image-train",
30
+ "test_size": 0.2, # must be between 0 and 1
31
+ "test_seed": 42, # must be non-negative
32
+ },
33
+ "audio":{
34
+ "dataset_name": "QuotaClimat/frugalaichallenge-audio-train",
35
+ "test_size": 0.2, # must be between 0 and 1
36
+ "test_seed": 42, # must be non-negative
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  }
38
+ }
 
 
 
 
 
 
 
39
 
40
 
41
+ def evaluate_model(task: str, space_url: str):
42
+ """
43
+ Evaluate a model through its API endpoint
44
+ """
45
+ # username = space_url.split("/")[0]
46
 
47
+ if "localhost" in space_url:
48
+ api_url = f"{space_url}/{task}"
49
+ else:
50
+ api_url = f"https://{space_url.lower().replace('/', '-')}.hf.space/{task}"
 
 
 
 
 
 
 
51
 
52
+ # Check if the space exists, will raise an error if it doesn't
53
+ api.space_info(repo_id=space_url)
54
+
55
+ try:
56
+ # Make API call to the space
57
+ params = DEFAULT_PARAMS[task]
58
+ response = requests.post(api_url, json=params)
59
+ if response.status_code != 200:
60
+ return None, None, None, gr.Warning(f"API call failed with status {response.status_code}")
61
+
62
+ results = response.json()
63
+
64
+ # Check for required keys
65
+ required_keys = {
66
+ "username", "space_url", "submission_timestamp", "model_description",
67
+ "accuracy", "energy_consumed_wh", "emissions_gco2eq", "emissions_data",
68
+ "api_route", "dataset_config"
69
+ }
70
+
71
+ missing_keys = required_keys - set(results.keys())
72
+ if missing_keys:
73
+ return None, None, None, gr.Warning(f"API response missing required keys: {', '.join(missing_keys)}")
74
+
75
+ return (
76
+ results["accuracy"],
77
+ results["emissions_gco2eq"],
78
+ results["energy_consumed_wh"],
79
+ results
80
+ )
81
+
82
+ except Exception as e:
83
+ return None, None, None, gr.Warning(str(e))
84
 
85
 
86
+ def submit_results(task: str, results_json):
87
  if not results_json:
88
  return gr.Warning("No results to submit")
89
 
 
 
 
 
 
 
 
 
90
  if not HF_TOKEN:
91
  return gr.Warning("HF_TOKEN not found. Please set up your Hugging Face token.")
92
 
93
  try:
 
94
  results_str = json.dumps(results_json)
95
 
96
  # Create a temporary file with the results
 
98
  f.write(results_str)
99
  temp_path = f.name
100
 
101
+ # Upload to the appropriate dataset based on task
102
  api = HfApi(token=HF_TOKEN)
103
  path_in_repo = f"submissions/{results_json['username']}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
104
+
105
+ dataset_mapping = {
106
+ "text": "frugal-ai-challenge/public-leaderboard-text",
107
+ "image": "frugal-ai-challenge/public-leaderboard-image",
108
+ "audio": "frugal-ai-challenge/public-leaderboard-audio"
109
+ }
110
+
111
  api.upload_file(
112
  path_or_fileobj=temp_path,
113
  path_in_repo=path_in_repo,
114
+ repo_id=dataset_mapping[task],
115
  repo_type="dataset",
116
  token=HF_TOKEN
117
  )
 
122
  return gr.Info("Results submitted successfully to the leaderboard! πŸŽ‰")
123
  except Exception as e:
124
  return gr.Warning(f"Error submitting results: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  # Create the demo interface
127
  with gr.Blocks() as demo:
128
 
129
  gr.Image("./logo.png", show_label=False, container=False)
 
130
  gr.Markdown("""
131
+ # Frugal AI Challenge - Submission Portal
132
+ Submit your model results for any of the three tasks: Text, Image, or Audio classification.
133
+ """)
134
+
135
  with gr.Tabs():
136
 
137
+
138
  with gr.Tab("Instructions"):
139
 
140
  gr.Markdown("""
141
+ To submit your results in one of the three tasks, please follow the steps below:
142
 
143
  ## Prepare your model submission
144
+ 1. Duplicate the template of the submission API by duplicating this space on your own Hugging Face account.
145
+ 2. In ``tasks/text.py``, ``tasks/image.py``, or ``tasks/audio.py``, modify the ``evaluate_model`` function to replace the baseline by your model loading and inference within the inference pass where the energy consumption and emissions are tracked.
146
  3. Eventually complete the requirements and/or any necessaries dependencies in your space.
147
+ 4. Write down your model card in the ``README.md`` file.
148
+ 5. Deploy your space (FastAPI) and verify that it works.
149
  6. (Optional) You can change the Space hardware to use any GPU directly on Hugging Face.
150
 
151
  ## Submit your model to the leaderboard in the ``Model Submission`` tab
152
+ When your API is deployed :
153
+ 1. Select the task you want to submit your model to
154
+ 2. Enter the Space URL of your API
155
+ 3. (Optional) Precise the API route (default is ``/text``, ``/image``, or ``/audio``)
156
+ 4. Step 1 - Evaluate model: Click on the button to evaluate your model. This will run you model on your API, computes the accuracy on the test set (20% of the train set), and track the energy consumption and emissions.
157
+ 5. Step 2 - Submit to leaderboard: Click on the button to submit your results to the leaderboard. This will upload the results to the leaderboard dataset and update the leaderboard.
158
+ 6. You can see the leaderboards at
159
+ - Text - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-text
160
+ - Image - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-image
161
+ - Audio - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-audio
162
 
163
  ## About
164
  > You can find more information about the Frugal AI Challenge 2025 on the [Frugal AI Challenge website](https://frugal-ai-challenge.org/).
 
170
  The goal of the Frugal AI Challenge is to encourage both academic and industry actors to keep efficiency in mind when deploying AI models. By tracking both energy consumption and performance for different AI tasks, we can incentivize frugality in AI deployment while also addressing real-world challenges.
171
  """)
172
 
173
+ # Text Classification Task
174
+ with gr.Tab("πŸ“œ Text Classification"):
175
  with gr.Row():
176
+ text_space_url = gr.Textbox(
177
+ label="Space URL",
178
+ placeholder="username/your-space",
179
+ lines=1
180
+ )
181
+ text_route = gr.Textbox(
182
+ label="API route (Advanced)",
183
+ value="/text",
184
+ lines=1
185
  )
186
 
187
  with gr.Row():
188
  with gr.Column(scale=1):
189
+ text_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
190
  with gr.Column(scale=1):
191
+ text_submit_btn = gr.Button("2. Submit to leaderboard", variant="primary")
192
 
193
  with gr.Row():
194
+ text_accuracy = gr.Number(label="Accuracy", precision=4)
195
+ text_energy = gr.Number(label="Energy Consumed (Wh)", precision=12)
196
+ text_emissions = gr.Number(label="Emissions (gCO2eq)", precision=12)
197
  with gr.Row():
198
+ text_results_json = gr.JSON(label="Detailed Results", visible=True)
199
 
200
+ # Image Classification Task
201
+ with gr.Tab("πŸŽ₯ Image Classification"):
202
+ with gr.Row():
203
+ image_space_url = gr.Textbox(
204
+ label="Space URL",
205
+ placeholder="username/your-space",
206
+ lines=1
207
+ )
208
+ image_route = gr.Textbox(
209
+ label="API route (Advanced)",
210
+ value="/image",
211
+ lines=1
212
+ )
213
 
214
+ with gr.Row():
215
+ with gr.Column(scale=1):
216
+ image_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
217
+ with gr.Column(scale=1):
218
+ image_submit_btn = gr.Button("2. Submit to leaderboard", variant="primary")
219
+
220
+ with gr.Row():
221
+ image_accuracy = gr.Number(label="Accuracy", precision=4)
222
+ image_energy = gr.Number(label="Energy Consumed (Wh)", precision=12)
223
+ image_emissions = gr.Number(label="Emissions (gCO2eq)", precision=12)
224
+ with gr.Row():
225
+ image_results_json = gr.JSON(label="Detailed Results", visible=True)
226
 
227
+ # Audio Classification Task
228
+ with gr.Tab("πŸ”Š Audio Classification"):
229
+ with gr.Row():
230
+ audio_space_url = gr.Textbox(
231
+ label="Space URL",
232
+ placeholder="username/your-space",
233
+ lines=1
234
+ )
235
+ audio_route = gr.Textbox(
236
+ label="API route (Advanced)",
237
+ value="/audio",
238
+ lines=1
239
+ )
240
+
241
+ with gr.Row():
242
+ with gr.Column(scale=1):
243
+ audio_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
244
+ with gr.Column(scale=1):
245
+ audio_submit_btn = gr.Button("2. Submit to leaderboard", variant="primary")
246
+
247
+ with gr.Row():
248
+ audio_accuracy = gr.Number(label="Accuracy", precision=4)
249
+ audio_energy = gr.Number(label="Energy Consumed (Wh)", precision=12)
250
+ audio_emissions = gr.Number(label="Emissions (gCO2eq)", precision=12)
251
+ with gr.Row():
252
+ audio_results_json = gr.JSON(label="Detailed Results", visible=True)
253
+
254
+ # Set up event handlers
255
+ text_evaluate_btn.click(
256
+ lambda url, route: evaluate_model(route.strip("/"), url),
257
+ inputs=[text_space_url, text_route],
258
+ outputs=[text_accuracy, text_emissions, text_energy, text_results_json]
259
+ )
260
+
261
+ image_evaluate_btn.click(
262
+ lambda url, route: evaluate_model(route.strip("/"), url),
263
+ inputs=[image_space_url, image_route],
264
+ outputs=[image_accuracy, image_emissions, image_energy, image_results_json]
265
+ )
266
+
267
+ audio_evaluate_btn.click(
268
+ lambda url, route: evaluate_model(route.strip("/"), url),
269
+ inputs=[audio_space_url, audio_route],
270
+ outputs=[audio_accuracy, audio_emissions, audio_energy, audio_results_json]
271
+ )
272
+
273
+ text_submit_btn.click(
274
+ lambda results: submit_results("text", results),
275
+ inputs=[text_results_json],
276
+ outputs=None
277
+ )
278
+
279
+ image_submit_btn.click(
280
+ lambda results: submit_results("image", results),
281
+ inputs=[image_results_json],
282
+ outputs=None
283
+ )
284
+
285
+ audio_submit_btn.click(
286
+ lambda results: submit_results("audio", results),
287
+ inputs=[audio_results_json],
288
+ outputs=None
289
+ )
290
 
291
  if __name__ == "__main__":
292
+ demo.launch()