Joash2024 commited on
Commit
77a8694
·
1 Parent(s): 95f0984

setup: configure test space with our fine-tuned model

Browse files
Files changed (3) hide show
  1. README.md +37 -7
  2. app.py +126 -0
  3. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,43 @@
1
  ---
2
- title: Math Llm Demo Test
3
- emoji: 👀
4
- colorFrom: green
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
 
 
 
 
 
 
 
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Math Derivative Solver (Test)
3
+ emoji: 🧮
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.0.0
8
  app_file: app.py
9
  pinned: false
10
+ hardware:
11
+ gpu: t4
12
+ memory: 16
13
+ python_packages:
14
+ - "torch>=2.0.0"
15
+ - "transformers>=4.30.0"
16
+ - "accelerate>=0.20.0"
17
+ - "bitsandbytes==0.41.1"
18
+ - "peft==0.6.2"
19
  ---
20
 
21
+ # Math Derivative Solver (Test Space)
22
+
23
+ This Space tests our newly trained math model for solving derivatives. We're using:
24
+
25
+ 1. Base Model: HuggingFaceTB/SmolLM2-1.7B-Instruct
26
+ 2. Our Fine-tuned Model: Joash2024/Math-SmolLM2-1.7B (LoRA adapter)
27
+
28
+ ## Features
29
+
30
+ - Solve derivatives of mathematical functions
31
+ - Step-by-step explanations
32
+ - Support for various functions:
33
+ - Polynomials (x^2, x^3 + 2x)
34
+ - Trigonometric (sin(x), cos(x))
35
+ - Exponential (e^x)
36
+ - Logarithmic (log(x))
37
+ - Combinations (x e^{-x})
38
+
39
+ ## Technical Details
40
+
41
+ - 8-bit quantization for efficient inference
42
+ - GPU acceleration with T4
43
+ - LaTeX notation support
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
+ from peft import PeftModel
5
+
6
+ # Model configurations
7
+ BASE_MODEL = "HuggingFaceTB/SmolLM2-1.7B-Instruct" # Base model
8
+ ADAPTER_MODEL = "Joash2024/Math-SmolLM2-1.7B" # Our LoRA adapter
9
+
10
+ print("Loading tokenizer...")
11
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
12
+ tokenizer.pad_token = tokenizer.eos_token
13
+
14
+ # Configure quantization
15
+ bnb_config = BitsAndBytesConfig(
16
+ load_in_8bit=True,
17
+ )
18
+
19
+ print("Loading base model...")
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ BASE_MODEL,
22
+ quantization_config=bnb_config,
23
+ device_map="auto",
24
+ torch_dtype=torch.float16
25
+ )
26
+
27
+ print("Loading LoRA adapter...")
28
+ model = PeftModel.from_pretrained(model, ADAPTER_MODEL)
29
+ model.eval()
30
+
31
+ def format_prompt(function: str) -> str:
32
+ """Format input prompt for the model"""
33
+ return f"""Given a mathematical function, find its derivative.
34
+
35
+ Function: {function}
36
+ The derivative of this function is:"""
37
+
38
+ def generate_derivative(function: str, max_length: int = 200) -> str:
39
+ """Generate derivative for a given function"""
40
+ # Format the prompt
41
+ prompt = format_prompt(function)
42
+
43
+ # Tokenize
44
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
45
+
46
+ # Generate
47
+ with torch.no_grad():
48
+ outputs = model.generate(
49
+ **inputs,
50
+ max_length=max_length,
51
+ num_return_sequences=1,
52
+ temperature=0.1,
53
+ do_sample=True,
54
+ pad_token_id=tokenizer.eos_token_id
55
+ )
56
+
57
+ # Decode and extract derivative
58
+ generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
59
+ derivative = generated[len(prompt):].strip()
60
+
61
+ return derivative
62
+
63
+ def solve_derivative(function: str) -> str:
64
+ """Solve derivative and format output"""
65
+ if not function:
66
+ return "Please enter a function"
67
+
68
+ print(f"\nGenerating derivative for: {function}")
69
+ derivative = generate_derivative(function)
70
+
71
+ # Format output with step-by-step explanation
72
+ output = f"""Generated derivative: {derivative}
73
+
74
+ Let's verify this step by step:
75
+ 1. Starting with f(x) = {function}
76
+ 2. Applying differentiation rules
77
+ 3. We get f'(x) = {derivative}"""
78
+
79
+ return output
80
+
81
+ # Create Gradio interface
82
+ with gr.Blocks(title="Mathematics Derivative Solver") as demo:
83
+ gr.Markdown("# Mathematics Derivative Solver")
84
+ gr.Markdown("Using our fine-tuned model to solve derivatives")
85
+
86
+ with gr.Row():
87
+ with gr.Column():
88
+ function_input = gr.Textbox(
89
+ label="Enter a function",
90
+ placeholder="Example: x^2, sin(x), e^x"
91
+ )
92
+ solve_btn = gr.Button("Find Derivative", variant="primary")
93
+
94
+ with gr.Row():
95
+ output = gr.Textbox(
96
+ label="Solution with Steps",
97
+ lines=6
98
+ )
99
+
100
+ # Example functions
101
+ gr.Examples(
102
+ examples=[
103
+ ["x^2"],
104
+ ["\\sin{\\left(x\\right)}"],
105
+ ["e^x"],
106
+ ["\\frac{1}{x}"],
107
+ ["x^3 + 2x"],
108
+ ["\\cos{\\left(x^2\\right)}"],
109
+ ["\\log{\\left(x\\right)}"],
110
+ ["x e^{-x}"]
111
+ ],
112
+ inputs=function_input,
113
+ outputs=output,
114
+ fn=solve_derivative,
115
+ cache_examples=True,
116
+ )
117
+
118
+ # Connect the interface
119
+ solve_btn.click(
120
+ fn=solve_derivative,
121
+ inputs=[function_input],
122
+ outputs=[output]
123
+ )
124
+
125
+ if __name__ == "__main__":
126
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ transformers>=4.30.0
3
+ accelerate>=0.20.0
4
+ bitsandbytes==0.41.1
5
+ peft==0.6.2
6
+ gradio==4.0.0