support qwen
Browse files- app.py +3 -1
- app_qwen.py +12 -12
- utils.py +6 -1
app.py
CHANGED
@@ -24,10 +24,12 @@ from app_showui import demo as demo_showui
|
|
24 |
from app_together import demo as demo_together
|
25 |
from app_xai import demo as demo_grok
|
26 |
from app_openai_voice import demo as demo_openai_voice
|
|
|
27 |
from utils import get_app
|
28 |
|
29 |
# Create mapping of providers to their demos
|
30 |
PROVIDERS = {
|
|
|
31 |
"Gemini": demo_gemini,
|
32 |
"OpenAI Voice": demo_openai_voice,
|
33 |
"Gemini Voice": demo_gemini_voice,
|
@@ -56,7 +58,7 @@ PROVIDERS = {
|
|
56 |
"NVIDIA": demo_nvidia,
|
57 |
}
|
58 |
|
59 |
-
demo = get_app(models=list(PROVIDERS.keys()), default_model="
|
60 |
|
61 |
if __name__ == "__main__":
|
62 |
demo.queue(api_open=False).launch(show_api=False)
|
|
|
24 |
from app_together import demo as demo_together
|
25 |
from app_xai import demo as demo_grok
|
26 |
from app_openai_voice import demo as demo_openai_voice
|
27 |
+
from app_qwen import demo as demo_qwen
|
28 |
from utils import get_app
|
29 |
|
30 |
# Create mapping of providers to their demos
|
31 |
PROVIDERS = {
|
32 |
+
"Qwen": demo_qwen,
|
33 |
"Gemini": demo_gemini,
|
34 |
"OpenAI Voice": demo_openai_voice,
|
35 |
"Gemini Voice": demo_gemini_voice,
|
|
|
58 |
"NVIDIA": demo_nvidia,
|
59 |
}
|
60 |
|
61 |
+
demo = get_app(models=list(PROVIDERS.keys()), default_model="Qwen", src=PROVIDERS, dropdown_label="Select Provider")
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
demo.queue(api_open=False).launch(show_api=False)
|
app_qwen.py
CHANGED
@@ -6,19 +6,19 @@ from utils import get_app
|
|
6 |
|
7 |
demo = get_app(
|
8 |
models=[
|
9 |
-
"qwen
|
10 |
-
"qwen
|
11 |
-
"qwen
|
12 |
-
"qwen
|
13 |
-
"
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"
|
18 |
-
"
|
19 |
-
"
|
20 |
],
|
21 |
-
default_model="
|
22 |
src=ai_gradio.registry,
|
23 |
accept_token=not os.getenv("DASHSCOPE_API_KEY"),
|
24 |
)
|
|
|
6 |
|
7 |
demo = get_app(
|
8 |
models=[
|
9 |
+
"qwen-turbo-latest",
|
10 |
+
"qwen-turbo",
|
11 |
+
"qwen-plus",
|
12 |
+
"qwen-max",
|
13 |
+
"qwen1.5-110b-chat",
|
14 |
+
"qwen1.5-72b-chat",
|
15 |
+
"qwen1.5-32b-chat",
|
16 |
+
"qwen1.5-14b-chat",
|
17 |
+
"qwen1.5-7b-chat",
|
18 |
+
"qwq-32b-preview",
|
19 |
+
"qvq-72b-preview",
|
20 |
],
|
21 |
+
default_model="qvq-72b-preview",
|
22 |
src=ai_gradio.registry,
|
23 |
accept_token=not os.getenv("DASHSCOPE_API_KEY"),
|
24 |
)
|
utils.py
CHANGED
@@ -21,7 +21,12 @@ def get_app(
|
|
21 |
for model_name in models:
|
22 |
with gr.Column(visible=model_name == default_model) as column:
|
23 |
if isinstance(src, dict):
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
25 |
else:
|
26 |
gr.load(name=model_name, src=src, accept_token=accept_token, **kwargs)
|
27 |
columns.append(column)
|
|
|
21 |
for model_name in models:
|
22 |
with gr.Column(visible=model_name == default_model) as column:
|
23 |
if isinstance(src, dict):
|
24 |
+
if ':' in model_name: # Handle provider:model format
|
25 |
+
src[f"qwen:{model_name}"].render()
|
26 |
+
|
27 |
+
else:
|
28 |
+
src[model_name].render()
|
29 |
+
|
30 |
else:
|
31 |
gr.load(name=model_name, src=src, accept_token=accept_token, **kwargs)
|
32 |
columns.append(column)
|