Spaces:
Sleeping
Sleeping
File size: 6,368 Bytes
0e7ff76 344dad8 d2da9d1 0e7ff76 344dad8 2d708a8 0e7ff76 d2da9d1 344dad8 2d708a8 344dad8 0e7ff76 d2da9d1 344dad8 d2da9d1 344dad8 d2da9d1 344dad8 d2da9d1 0e7ff76 d2da9d1 2d708a8 98a6116 2d708a8 d2da9d1 2d708a8 d2da9d1 98a6116 2d708a8 0e7ff76 2d708a8 d2da9d1 2d708a8 d2da9d1 2d708a8 d2da9d1 0e7ff76 d2da9d1 2d708a8 d2da9d1 2d708a8 d2da9d1 2d708a8 0e7ff76 d2da9d1 2d708a8 0e7ff76 d2da9d1 2d708a8 d2da9d1 0e7ff76 d2da9d1 0e7ff76 2d708a8 d2da9d1 0e7ff76 d2da9d1 0e7ff76 2d708a8 0e7ff76 2d708a8 d2da9d1 0e7ff76 d2da9d1 2d708a8 0e7ff76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
from monitoring import PerformanceMonitor, measure_time
# Model configurations
MODEL_OPTIONS = {
"Base Model": {
"id": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
"is_base": True
},
"Fine-tuned Model": {
"id": "Joash2024/Math-SmolLM2-1.7B",
"is_base": False
}
}
# Initialize performance monitor
monitor = PerformanceMonitor()
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
tokenizer.pad_token = tokenizer.eos_token
def format_prompt(problem: str, problem_type: str) -> str:
"""Format input prompt for the model"""
if problem_type == "Derivative":
return f"""Given a mathematical function, find its derivative.
Function: {problem}
The derivative of this function is:"""
elif problem_type == "Addition":
return f"""Solve this addition problem.
Problem: {problem}
The solution is:"""
else: # Roots or Custom
return f"""Find the derivative of this function.
Function: {problem}
The derivative is:"""
@measure_time
def get_model_response(problem: str, problem_type: str, model_info) -> str:
"""Get response from a specific model"""
try:
# Load model
if model_info["is_base"]:
print(f"Loading {model_info['id']}...")
model = AutoModelForCausalLM.from_pretrained(
model_info["id"],
device_map="auto",
torch_dtype=torch.float16
)
else:
print("Loading base model for fine-tuned...")
base = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
device_map="auto",
torch_dtype=torch.float16
)
print(f"Loading {model_info['id']}...")
model = PeftModel.from_pretrained(base, model_info["id"])
model.eval()
# Format prompt and generate
prompt = format_prompt(problem, problem_type)
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=100,
num_return_sequences=1,
temperature=0.1,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
# Decode and extract response
generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
response = generated[len(prompt):].strip()
# Clean up
del model
if not model_info["is_base"]:
del base
torch.cuda.empty_cache()
return response
except Exception as e:
return f"Error: {str(e)}"
def solve_problem(problem: str, problem_type: str, model_type: str) -> tuple:
"""Solve a math problem using selected model"""
if not problem:
return "Please enter a problem", None
# Record problem type
monitor.record_problem_type(problem_type)
# Get response from selected model
model_info = MODEL_OPTIONS[model_type]
response, time_taken = get_model_response(problem, problem_type, model_info)
# Format response with steps
output = f"""Solution: {response}
Let's verify this step by step:
1. Starting with f(x) = {problem}
2. Applying differentiation rules
3. We get f'(x) = {response}"""
# Record metrics
monitor.record_response_time(model_type, time_taken)
monitor.record_success(model_type, not response.startswith("Error"))
# Get updated statistics
stats = monitor.get_statistics()
# Format statistics for display
stats_display = f"""
### Performance Metrics
#### Response Times (seconds)
- {model_type}: {stats.get(f'{model_type}_avg_response_time', 0):.2f} avg
#### Success Rates
- {model_type}: {stats.get(f'{model_type}_success_rate', 0):.1f}%
#### Problem Types Used
"""
for ptype, percentage in stats.get('problem_type_distribution', {}).items():
stats_display += f"- {ptype}: {percentage:.1f}%\n"
return output, stats_display
# Create Gradio interface
with gr.Blocks(title="Mathematics Problem Solver") as demo:
gr.Markdown("# Mathematics Problem Solver")
gr.Markdown("Test our models on mathematical problems")
with gr.Row():
with gr.Column():
problem_type = gr.Dropdown(
choices=["Addition", "Root Finding", "Derivative", "Custom"],
value="Derivative",
label="Problem Type"
)
model_type = gr.Dropdown(
choices=list(MODEL_OPTIONS.keys()),
value="Fine-tuned Model",
label="Model to Use"
)
problem_input = gr.Textbox(
label="Enter your math problem",
placeholder="Example: x^2 + 3x"
)
solve_btn = gr.Button("Solve", variant="primary")
with gr.Row():
solution_output = gr.Textbox(label="Solution", lines=5)
# Performance metrics display
with gr.Row():
metrics_display = gr.Markdown("### Performance Metrics\n*Solve a problem to see metrics*")
# Example problems
gr.Examples(
examples=[
["x^2 + 3x", "Derivative", "Fine-tuned Model"],
["144", "Root Finding", "Fine-tuned Model"],
["235 + 567", "Addition", "Fine-tuned Model"],
["\\sin{\\left(x\\right)}", "Derivative", "Fine-tuned Model"],
["e^x", "Derivative", "Fine-tuned Model"],
["\\frac{1}{x}", "Derivative", "Fine-tuned Model"],
["x^3 + 2x", "Derivative", "Fine-tuned Model"],
["\\cos{\\left(x^2\\right)}", "Derivative", "Fine-tuned Model"]
],
inputs=[problem_input, problem_type, model_type],
outputs=[solution_output, metrics_display],
fn=solve_problem,
cache_examples=True,
)
# Connect the interface
solve_btn.click(
fn=solve_problem,
inputs=[problem_input, problem_type, model_type],
outputs=[solution_output, metrics_display]
)
if __name__ == "__main__":
demo.launch()
|