Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ client = InferenceClient()
|
|
8 |
# Generator function for streaming AI response
|
9 |
def generate_response(prompt_template, **kwargs):
|
10 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
11 |
-
|
12 |
model="Qwen/Qwen2.5-Math-1.5B-Instruct",
|
13 |
messages=[{"role": "user", "content": prompt}],
|
14 |
temperature=0.7,
|
@@ -16,9 +16,7 @@ def generate_response(prompt_template, **kwargs):
|
|
16 |
top_p=0.8,
|
17 |
stream=True
|
18 |
)
|
19 |
-
|
20 |
-
yield chunk.choices[0].delta.content # Yield chunks as they are generated
|
21 |
-
|
22 |
|
23 |
# Gradio app interface
|
24 |
with gr.Blocks() as app:
|
@@ -49,8 +47,7 @@ with gr.Blocks() as app:
|
|
49 |
fn=lambda *args: generate_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
|
50 |
inputs=input_fields,
|
51 |
outputs=output,
|
52 |
-
api_name=f"/{tab_name.lower().replace(' ', '_')}_execute"
|
53 |
-
stream=True
|
54 |
)
|
55 |
|
56 |
# Tabs for functionalities
|
|
|
8 |
# Generator function for streaming AI response
|
9 |
def generate_response(prompt_template, **kwargs):
|
10 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
11 |
+
response = client.chat.completions.create(
|
12 |
model="Qwen/Qwen2.5-Math-1.5B-Instruct",
|
13 |
messages=[{"role": "user", "content": prompt}],
|
14 |
temperature=0.7,
|
|
|
16 |
top_p=0.8,
|
17 |
stream=True
|
18 |
)
|
19 |
+
return response.choices[0].message["content"]
|
|
|
|
|
20 |
|
21 |
# Gradio app interface
|
22 |
with gr.Blocks() as app:
|
|
|
47 |
fn=lambda *args: generate_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
|
48 |
inputs=input_fields,
|
49 |
outputs=output,
|
50 |
+
api_name=f"/{tab_name.lower().replace(' ', '_')}_execute"
|
|
|
51 |
)
|
52 |
|
53 |
# Tabs for functionalities
|