Update model configurations and add .gitignore
Browse files- .gitignore +2 -0
- app.py +7 -7
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
.conda
|
app.py
CHANGED
@@ -8,7 +8,7 @@ import copy
|
|
8 |
|
9 |
# Default configuration
|
10 |
default_config = {
|
11 |
-
"main_model": "llama-3.
|
12 |
"cycles": 3,
|
13 |
"layer_agent_config": {}
|
14 |
}
|
@@ -20,12 +20,12 @@ layer_agent_config_def = {
|
|
20 |
},
|
21 |
"layer_agent_2": {
|
22 |
"system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
|
23 |
-
"model_name": "llama-3.2-
|
24 |
"temperature": 0.7
|
25 |
},
|
26 |
"layer_agent_3": {
|
27 |
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
|
28 |
-
"model_name": "
|
29 |
},
|
30 |
|
31 |
}
|
@@ -33,7 +33,7 @@ layer_agent_config_def = {
|
|
33 |
# Recommended Configuration
|
34 |
|
35 |
rec_config = {
|
36 |
-
"main_model": "llama-3.
|
37 |
"cycles": 2,
|
38 |
"layer_agent_config": {}
|
39 |
}
|
@@ -148,7 +148,6 @@ valid_model_names = [
|
|
148 |
# st.write("---")
|
149 |
|
150 |
|
151 |
-
|
152 |
# Initialize session state
|
153 |
if "messages" not in st.session_state:
|
154 |
st.session_state.messages = []
|
@@ -230,10 +229,11 @@ with st.sidebar:
|
|
230 |
st.markdown("---")
|
231 |
st.markdown("""
|
232 |
### Credits
|
233 |
-
-
|
234 |
-
-
|
235 |
- Paper: [arXiv:2406.04692](https://arxiv.org/abs/2406.04692)
|
236 |
- GitHub repo: [skapadia3214/groq-moa](https://github.com/skapadia3214/groq-moa)
|
|
|
237 |
""")
|
238 |
|
239 |
# Main app layout
|
|
|
8 |
|
9 |
# Default configuration
|
10 |
default_config = {
|
11 |
+
"main_model": "llama-3.3-70b-versatile",
|
12 |
"cycles": 3,
|
13 |
"layer_agent_config": {}
|
14 |
}
|
|
|
20 |
},
|
21 |
"layer_agent_2": {
|
22 |
"system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
|
23 |
+
"model_name": "llama-3.2-1b-preview",
|
24 |
"temperature": 0.7
|
25 |
},
|
26 |
"layer_agent_3": {
|
27 |
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
|
28 |
+
"model_name": "llama-3.2-3b-preview"
|
29 |
},
|
30 |
|
31 |
}
|
|
|
33 |
# Recommended Configuration
|
34 |
|
35 |
rec_config = {
|
36 |
+
"main_model": "llama-3.3-70b-versatile",
|
37 |
"cycles": 2,
|
38 |
"layer_agent_config": {}
|
39 |
}
|
|
|
148 |
# st.write("---")
|
149 |
|
150 |
|
|
|
151 |
# Initialize session state
|
152 |
if "messages" not in st.session_state:
|
153 |
st.session_state.messages = []
|
|
|
229 |
st.markdown("---")
|
230 |
st.markdown("""
|
231 |
### Credits
|
232 |
+
- MoA info: [Together AI](https://www.together.ai/blog/together-moa)
|
233 |
+
- Groq Models: [Groq](https://groq.com/)
|
234 |
- Paper: [arXiv:2406.04692](https://arxiv.org/abs/2406.04692)
|
235 |
- GitHub repo: [skapadia3214/groq-moa](https://github.com/skapadia3214/groq-moa)
|
236 |
+
- Webpage: [diegoromero.es](https://diegoromero.es)
|
237 |
""")
|
238 |
|
239 |
# Main app layout
|