yasserrmd commited on
Commit
393fe1b
·
verified ·
1 Parent(s): 1f74404

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import os # For environment variables
4
- import time # To simulate processing time if needed
5
 
6
  # Initialize the Hugging Face Inference Client
7
  client = InferenceClient()
@@ -9,26 +9,28 @@ client = InferenceClient()
9
 
10
 
11
  def preprocess_latex(content):
12
- # Split content into lines for better formatting
13
  lines = content.split("\n")
14
- formatted_lines = []
15
-
16
  for line in lines:
17
- # If a line contains equations or math expressions, wrap them properly
18
- if "Simplify" in line or "Solve" in line or "boxed" in line or "frac" in line:
19
- formatted_lines.append(f"$$ {line.strip()} $$") # Block math
20
- elif "(" in line and ")" in line: # Inline math for variables
21
- formatted_lines.append(line.replace("(", "$").replace(")", "$"))
 
 
 
 
22
  else:
23
- formatted_lines.append(line) # Plain text
24
 
25
- # Join lines back into a single string
26
- return "\n".join(formatted_lines)
27
 
28
  # Function to generate and format AI response
29
  def generate_response(prompt_template, **kwargs):
30
  # Simulate processing/loading
31
- time.sleep(1) # Optional: Remove or adjust based on actual execution time
32
  prompt = os.getenv(prompt_template).format(**kwargs)
33
  response = client.chat.completions.create(
34
  model="Qwen/Qwen2.5-Math-1.5B-Instruct",
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
+ from sympy import symbols, Eq, solve, latex
5
 
6
  # Initialize the Hugging Face Inference Client
7
  client = InferenceClient()
 
9
 
10
 
11
  def preprocess_latex(content):
12
+ # Example: Split the response into meaningful parts
13
  lines = content.split("\n")
14
+ processed_lines = []
15
+
16
  for line in lines:
17
+ if "[" in line and "]" in line: # Treat as a mathematical expression
18
+ math_expr = line.strip("[]")
19
+ try:
20
+ # Parse and convert to LaTeX
21
+ processed_lines.append(f"$$ {math_expr} $$")
22
+ except:
23
+ processed_lines.append(line) # Fallback to raw line if parsing fails
24
+ elif "(" in line and ")" in line: # Treat as inline math
25
+ processed_lines.append(line.replace("(", "$").replace(")", "$"))
26
  else:
27
+ processed_lines.append(line) # Plain text remains as-is
28
 
29
+ return "\n".join(processed_lines)
 
30
 
31
  # Function to generate and format AI response
32
  def generate_response(prompt_template, **kwargs):
33
  # Simulate processing/loading
 
34
  prompt = os.getenv(prompt_template).format(**kwargs)
35
  response = client.chat.completions.create(
36
  model="Qwen/Qwen2.5-Math-1.5B-Instruct",