Spaces:
Running
Running
File size: 7,352 Bytes
b064a39 a2c34b1 e8d658f 96e03ae b064a39 e8d658f ea9b1ca 96e03ae ea9b1ca a2c34b1 e8d658f 96e03ae e8d658f 96e03ae b064a39 a2c34b1 e8d658f b2730cf e8d658f a5a6781 e8d658f a2c34b1 b064a39 e8d658f b064a39 91a1fc2 96e03ae 63b28b4 e8d658f 63b28b4 96e03ae 91a1fc2 96e03ae 63b28b4 96e03ae 63b28b4 b064a39 96e03ae 63b28b4 96e03ae b2730cf e8d658f b2730cf e8d658f 03346c0 e8d658f b2730cf e8d658f b2730cf e8d658f b2730cf e8d658f 3a023fb 727a804 a5a6781 727a804 9926870 03346c0 3a023fb 9926870 e8d658f a2c34b1 63b28b4 e8d658f a2c34b1 03346c0 e8d658f a2c34b1 03346c0 e8d658f 03346c0 e8d658f a2c34b1 63b28b4 91a1fc2 63b28b4 e8d658f 96e03ae e8d658f a2c34b1 63b28b4 96e03ae 63b28b4 b064a39 96e03ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import gradio as gr
import pandas as pd
from pathlib import Path
import logging
from datetime import datetime
import sys
import uuid
from typing import Dict, Any
# Add parent directory to path to import main
sys.path.append(str(Path(__file__).parent))
from main import (
StorageManager,
EvaluationRequest,
evaluate_model,
PATHS
)
logging.basicConfig(level=logging.INFO)
# Initialize storage manager
storage_manager = StorageManager(PATHS)
def load_leaderboard_data():
try:
return pd.DataFrame(storage_manager.load('leaderboard'))
except Exception as e:
logging.error(f"Error loading leaderboard: {e}")
return pd.DataFrame()
def format_leaderboard_df(df):
if df.empty:
return df
display_df = pd.DataFrame({
"Model": df["model"],
"Average PER β¬οΈ": df["average_per"].apply(lambda x: f"{x:.4f}"),
"Average PWED β¬οΈ": df["average_pwed"].apply(lambda x: f"{x:.4f}"),
"Link": df["github_url"].apply(lambda x: f'<a href="{x}" target="_blank">Repository</a>' if x else "N/A"),
"Submission Date": pd.to_datetime(df["submission_date"]).dt.strftime("%Y-%m-%d")
})
return display_df.sort_values("Average PER β¬οΈ")
def create_html_table(df):
return df.to_html(escape=False, index=False, classes="styled-table")
def submit_evaluation(model_name: str, submission_name: str, github_url: str) -> str:
if not model_name or not submission_name:
return "β οΈ Please provide both model name and submission name."
try:
# Generate a task ID
task_id = str(uuid.uuid4())
# Create evaluation request
request = EvaluationRequest(
transcription_model=model_name,
submission_name=submission_name,
github_url=github_url if github_url else None,
subset="test"
)
# Create task entry
task = {
"id": task_id,
"model": model_name,
"subset": "test",
"submission_name": submission_name,
"github_url": github_url,
"status": "queued",
"submitted_at": datetime.now().isoformat()
}
# Save task
tasks = storage_manager.load('tasks')
tasks.append(task)
storage_manager.save('tasks', tasks)
# Start evaluation in background
import asyncio
asyncio.run(evaluate_model(task_id, request))
return f"β
Evaluation submitted successfully! Task ID: {task_id}"
except Exception as e:
return f"β Error: {str(e)}"
def check_status(query: str) -> Dict[str, Any]:
if not query:
return {"error": "Please enter a model name or task ID"}
try:
results = storage_manager.load('results')
tasks = storage_manager.load('tasks')
# First try to find by task ID
result = next((r for r in results if r["task_id"] == query), None)
task = next((t for t in tasks if t["id"] == query), None)
# If not found, try to find by model name
if not result:
result = next((r for r in results if r["model"] == query), None)
if not task:
task = next((t for t in tasks if t["model"] == query), None)
if result:
# If we found results, return them
return {
"status": "completed",
"model": result["model"],
"subset": result["subset"],
"num_files": result["num_files"],
"average_per": result["average_per"],
"average_pwed": result["average_pwed"],
"detailed_results": result["detailed_results"],
"timestamp": result["timestamp"]
}
elif task:
# If we only found task status, return that
return task
else:
return {"error": f"No results found for '{query}'"}
except Exception as e:
logging.error(f"Error checking status: {e}")
return {"error": f"Error checking status: {str(e)}"}
with gr.Blocks(css="""
.styled-table {
width: 100%;
border-collapse: collapse;
margin: 25px 0;
font-size: 0.9em;
font-family: sans-serif;
box-shadow: 0 0 20px rgba(0, 0, 0, 0.15);
}
.styled-table thead tr {
background-color: #004999;
color: #ffffff;
text-align: left;
}
.styled-table th,
.styled-table td {
padding: 12px 15px;
}
.styled-table tbody tr {
border-bottom: 1px solid #dddddd;
}
""") as demo:
gr.Markdown("# π― Phonemic Transcription Leaderboard")
gr.Markdown("#### Developed By: Koel Labs")
gr.Markdown("""
## Explanation of Metrics
- **PER (Phoneme Error Rate)**: The Levenshtein distance calculated between phoneme sequences of the predicted and actual transcriptions.
- **PWED (Phoneme Weighted Edit Distance)**: Edit distance between the predicted and actual phoneme sequences, weighted by the phonemic feature distance. Method provided by [panphon library](https://github.com/dmort27/panphon)
""")
gr.Markdown("""
## Test Set Information
The test set used for evaluation is from the [TIMIT speech corpus](https://www.kaggle.com/datasets/mfekadu/darpa-timit-acousticphonetic-continuous-speech). The TIMIT corpus is a widely used dataset for speech recognition research.
## Compute
This leaderboard uses the free basic plan (16GB RAM, 2vCPUs) to allow for reproducability. The evaluation may take several hours to complete. Please be patient and do not submit the same model multiple times.
""")
with gr.Tabs():
with gr.TabItem("π Leaderboard"):
leaderboard_html = gr.HTML(create_html_table(format_leaderboard_df(load_leaderboard_data())))
refresh_btn = gr.Button("π Refresh")
refresh_btn.click(
lambda: create_html_table(format_leaderboard_df(load_leaderboard_data())),
outputs=leaderboard_html
)
with gr.TabItem("π Submit Model"):
model_name = gr.Textbox(label="Model Name", placeholder="facebook/wav2vec2-lv-60-espeak-cv-ft")
submission_name = gr.Textbox(label="Submission Name", placeholder="My Model v1.0")
github_url = gr.Textbox(label="Github/Kaggle/HF URL (optional)", placeholder="https://github.com/username/repo")
submit_btn = gr.Button("Submit")
result = gr.Textbox(label="Submission Status")
submit_btn.click(
fn=submit_evaluation,
inputs=[model_name, submission_name, github_url],
outputs=result
)
with gr.TabItem("π Model Status"):
query = gr.Textbox(label="Model Name or Task ID", placeholder="Enter model name (e.g., facebook/wav2vec2-lv-60-espeak-cv-ft)")
status_btn = gr.Button("Check Status")
status_output = gr.JSON(label="Status")
status_btn.click(
fn=check_status,
inputs=query,
outputs=status_output
)
if __name__ == "__main__":
demo.launch() |