Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
import os
|
4 |
-
|
5 |
|
6 |
# Initialize the Hugging Face Inference Client
|
7 |
client = InferenceClient()
|
@@ -9,26 +9,28 @@ client = InferenceClient()
|
|
9 |
|
10 |
|
11 |
def preprocess_latex(content):
|
12 |
-
# Split
|
13 |
lines = content.split("\n")
|
14 |
-
|
15 |
-
|
16 |
for line in lines:
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
22 |
else:
|
23 |
-
|
24 |
|
25 |
-
|
26 |
-
return "\n".join(formatted_lines)
|
27 |
|
28 |
# Function to generate and format AI response
|
29 |
def generate_response(prompt_template, **kwargs):
|
30 |
# Simulate processing/loading
|
31 |
-
time.sleep(1) # Optional: Remove or adjust based on actual execution time
|
32 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
33 |
response = client.chat.completions.create(
|
34 |
model="Qwen/Qwen2.5-Math-1.5B-Instruct",
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
from sympy import symbols, Eq, solve, latex
|
5 |
|
6 |
# Initialize the Hugging Face Inference Client
|
7 |
client = InferenceClient()
|
|
|
9 |
|
10 |
|
11 |
def preprocess_latex(content):
|
12 |
+
# Example: Split the response into meaningful parts
|
13 |
lines = content.split("\n")
|
14 |
+
processed_lines = []
|
15 |
+
|
16 |
for line in lines:
|
17 |
+
if "[" in line and "]" in line: # Treat as a mathematical expression
|
18 |
+
math_expr = line.strip("[]")
|
19 |
+
try:
|
20 |
+
# Parse and convert to LaTeX
|
21 |
+
processed_lines.append(f"$$ {math_expr} $$")
|
22 |
+
except:
|
23 |
+
processed_lines.append(line) # Fallback to raw line if parsing fails
|
24 |
+
elif "(" in line and ")" in line: # Treat as inline math
|
25 |
+
processed_lines.append(line.replace("(", "$").replace(")", "$"))
|
26 |
else:
|
27 |
+
processed_lines.append(line) # Plain text remains as-is
|
28 |
|
29 |
+
return "\n".join(processed_lines)
|
|
|
30 |
|
31 |
# Function to generate and format AI response
|
32 |
def generate_response(prompt_template, **kwargs):
|
33 |
# Simulate processing/loading
|
|
|
34 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
35 |
response = client.chat.completions.create(
|
36 |
model="Qwen/Qwen2.5-Math-1.5B-Instruct",
|