acecalisto3 commited on
Commit
074e33f
·
verified ·
1 Parent(s): bb9deb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +241 -203
app.py CHANGED
@@ -1,36 +1,4 @@
1
- from huggingface_hub import InferenceClient
2
- import gradio as gr
3
- import random
4
- from logx import prompts
5
- import os
6
- import sys
7
- import json
8
- from typing import List, Dict
9
-
10
- # Import necessary modules from other files
11
- import { createLlamaPrompt } from "./createLlamaPrompt.mts";
12
- import { createSpace } from "./createSpace.mts";
13
- import { isPythonOrGradioAppPrompt } from "./isPythonOrGradioAppPrompt.mts";
14
- import { isReactAppPrompt } from "./isReactAppPrompt.mts";
15
- import { isStreamlitAppPrompt } from "./isStreamlitAppPrompt.mts";
16
- import { getWebApp } from "./getWebApp.mts";
17
- import { getGradioApp } from "./getGradioApp.mts";
18
- import { getReactApp } from "./getReactApp.mts";
19
- import { getStreamlitApp } from "./getStreamlitApp.mts";
20
- import { parseTutorial } from "./parseTutorial.mts";
21
- import { generateFiles } from "./generateFiles.mts";
22
- import { createLlamaPrompt } from "./createLlamaPrompt.mts";
23
- import { createSpace } from "./createSpace.mts";
24
- import { isPythonOrGradioAppPrompt } from "./isPythonOrGradioAppPrompt.mts";
25
- import { isReactAppPrompt } from "./isReactAppPrompt.mts";
26
- import { isStreamlitAppPrompt } from "./isStreamlitAppPrompt.mts";
27
- import { getWebApp } from "./getWebApp.mts";
28
- import { getGradioApp } from "./getGradioApp.mts";
29
- import { getReactApp } from "./getReactApp.mts";
30
- import { getStreamlitApp } from "./getStreamlitApp.mts";
31
- import { parseTutorial } from "./parseTutorial.mts";
32
- import { generateFiles } from "./generateFiles.mts";
33
- from agent import Agent
34
  from types import (
35
  Code,
36
  Prompt,
@@ -45,197 +13,267 @@ from types import (
45
  ReactApp,
46
  Code,
47
  )
48
-
49
- client = InferenceClient(
50
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
 
 
 
 
 
 
 
 
 
51
  )
 
52
 
53
- def run():
54
- text_output = "Some text output"
55
- chatbot_output = [{"text": "Chatbot response"}]
56
-
57
- return text_output, chatbot_output
 
58
 
59
- # Ensure the function is properly linked to the event
60
- interface = gr.Interface(fn=run, inputs=[...], outputs=[gr.Textbox(), gr.Chatbot()])
 
 
61
 
62
- # Define the main function
63
- def main():
64
- """
65
- Main function that orchestrates the code generation process.
66
- """
67
 
68
- # Load prompts from prompts.py
69
- prompts = load_prompts()
 
 
70
 
71
- # Initialize an Agent instance
72
- agent = Agent(prompts)
73
 
74
- # Get the user's input
75
- user_input = input("Enter your prompt: ")
 
 
76
 
77
- # Process the user's input
78
- result = agent.process(user_input)
79
 
80
- # Print the result
81
- print(result)
 
 
82
 
83
- # Function to load prompts from prompts.py
84
- def load_prompts():
85
- """
86
- Loads prompts from prompts.py.
87
- """
88
 
89
- prompts = {
90
- "createLlamaPrompt": createLlamaPrompt,
91
- "createSpace": createSpace,
92
- "isPythonOrGradioAppPrompt": isPythonOrGradioAppPrompt,
93
- "isReactAppPrompt": isReactAppPrompt,
94
- "isStreamlitAppPrompt": isStreamlitAppPrompt,
95
- "getWebApp": getWebApp,
96
- "getGradioApp": getGradioApp,
97
- "getReactApp": getReactApp,
98
- "getStreamlitApp": getStreamlitApp,
99
- "parseTutorial": parseTutorial,
100
- "generateFiles": generateFiles,
101
- }
102
 
103
- return prompts
 
104
 
105
- # Indentation corrected here
106
- def create_prompt(app_type: str, app_name: str, app_description: str, app_features: list[str], app_dependencies: list[str], app_space: str, app_tutorial: str) -> str:
107
- prompt = f"""
108
- I need you to help me create a {app_type} web application.
109
 
110
- The application name is: {app_name}
 
111
 
112
- The application description is: {app_description}
 
113
 
114
- The application features are: {app_features}
 
115
 
116
- The application dependencies are: {app_dependencies}
117
 
118
- The application space is: {app_space}
 
 
 
119
 
120
- The application tutorial is: {app_tutorial}
 
 
 
 
 
 
 
 
121
 
122
- Please generate the code for the application.
123
- """
124
- return prompt
125
-
126
- def format_prompt(message, history):
127
- prompt = "<s>"
128
- for user_prompt, bot_response in history:
129
- prompt += f"[INST] {user_prompt} [/INST]"
130
- prompt += f" {bot_response}</s> "
131
- prompt += f"[INST] {message} [/INST]"
132
- return prompt
133
-
134
- def generate(
135
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
136
- ):
137
- seed = random.randint(1,1111111111111111)
138
-
139
- system_prompt=agent
140
- temperature = float(temperature)
141
- if temperature < 1e-2:
142
- temperature = 1e-2
143
- top_p = float(top_p)
144
-
145
- generate_kwargs = dict(
146
- temperature=temperature,
147
- max_new_tokens=max_new_tokens,
148
- top_p=top_p,
149
- repetition_penalty=repetition_penalty,
150
- do_sample=True,
151
- seed=seed,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  )
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
- additional_inputs=[
156
- gr.Dropdown(
157
- label="Agents",
158
- choices=[s for s in agents],
159
- value=agents[0],
160
- interactive=True,
161
- ),
162
- gr.Textbox(
163
- label="System Prompt",
164
- max_lines=1,
165
- interactive=True,
166
- ),
167
- gr.Slider(
168
- label="Temperature",
169
- value=0.9,
170
- minimum=0.0,
171
- maximum=1.0,
172
- step=0.05,
173
- interactive=True,
174
- info="Higher values produce more diverse outputs",
175
- ),
176
-
177
- gr.Slider(
178
- label="Max new tokens",
179
- value=1048*10,
180
- minimum=0,
181
- maximum=1000*10,
182
- step=64,
183
- interactive=True,
184
- info="The maximum numbers of new tokens",
185
- ),
186
- gr.Slider(
187
- label="Top-p (nucleus sampling)",
188
- value=0.90,
189
- minimum=0.0,
190
- maximum=1,
191
- step=0.05,
192
- interactive=True,
193
- info="Higher values sample more low-probability tokens",
194
- ),
195
- gr.Slider(
196
- label="Repetition penalty",
197
- value=1.2,
198
- minimum=1.0,
199
- maximum=2.0,
200
- step=0.05,
201
- interactive=True,
202
- info="Penalize repeated tokens",
203
- ),
204
- ]
205
-
206
- examples=[
207
- ["Create a simple web application using Flask", agents[0], None, None, None, None, ],
208
- ["Generate a Python script to perform a linear regression analysis", agents[2], None, None, None, None, ],
209
- ["Create a Dockerfile for a Node.js application", agents[1], None, None, None, None, ],
210
- ["Write a shell script to automate the deployment of a web application to a server", agents[3], None, None, None, None, ],
211
- ["Generate a SQL query to retrieve the top 10 most popular products by sales", agents[4], None, None, None, None, ],
212
- ["Write a Python script to generate a random password with a given length and complexity", agents[2], None, None, None, None, ],
213
- ["Create a simple game in Unity using C#", agents[0], None, None, None, None, ],
214
- ["Generate a Java program to implement a binary search algorithm", agents[2], None, None, None, None, ],
215
- ["Write a shell script to monitor the CPU usage of a server", agents[1], None, None, None, None, ],
216
- ["Create a simple web application using React and Node.js", agents[0], None, None, None, None, ],
217
- ["Generate a Python script to perform a sentiment analysis on a given text", agents[2], None, None, None, None, ],
218
- ["Write a shell script to automate the backup of a MySQL database", agents[1], None, None, None, None, ],
219
- ["Create a simple game in Unreal Engine using C++", agents[3], None, None, None, None, ],
220
- ["Generate a Java program to implement a bubble sort algorithm", agents[2], None, None, None, None, ],
221
- ["Write a shell script to monitor the memory usage of a server", agents[1], None, None, None, None, ],
222
- ["Create a simple web application using Angular and Node.js", agents[0], None, None, None, None, ],
223
- ["Generate a Python script to perform a text classification on a given dataset", agents[2], None, None, None, None, ],
224
- ["Write a shell script to automate the installation of a software package on a server", agents[1], None, None, None, None, ],
225
- ["Create a simple game in Godot using GDScript", agents[3], None, None, None, None, ],
226
- ["Generate a Java program to implement a merge sort algorithm", agents[2], None, None, None, None, ],
227
- ["Write a shell script to automate the cleanup of temporary files on a server", agents[1], None, None, None, None, ],
228
- ]
229
-
230
- gr.ChatInterface(
231
- fn=generate,
232
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
233
- additional_inputs=additional_inputs,
234
- title="Mixtral 46.7B",
235
- examples=examples,
236
- concurrency_limit=20,
237
- ).launch(show_api=False)
238
-
239
- # Run the main function if the script is executed directly
240
  if __name__ == "__main__":
241
- main()
 
1
+ from typing import List, Dict, Optional
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from types import (
3
  Code,
4
  Prompt,
 
13
  ReactApp,
14
  Code,
15
  )
16
+ from agent import Agent
17
+ from prompts import (
18
+ createLlamaPrompt,
19
+ createSpace,
20
+ isPythonOrGradioAppPrompt,
21
+ isReactAppPrompt,
22
+ isStreamlitAppPrompt,
23
+ getWebApp,
24
+ getGradioApp,
25
+ getReactApp,
26
+ getStreamlitApp,
27
+ parseTutorial,
28
+ generateFiles,
29
  )
30
+ from huggingface_hub import InferenceClient
31
 
32
+ class Agent:
33
+ def __init__(self, prompts: Dict[str, any]):
34
+ self.prompts = prompts
35
+ self.client = InferenceClient(
36
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
37
+ )
38
 
39
+ def process(self, user_input: str) -> str:
40
+ """
41
+ Processes the user's input and generates code.
42
+ """
43
 
44
+ # Parse the user's input
45
+ app_type, app_name, app_description, app_features, app_dependencies, app_space, app_tutorial = self.parse_input(user_input)
 
 
 
46
 
47
+ # Generate a prompt for the Llama model
48
+ prompt = self.prompts["createLlamaPrompt"](
49
+ app_type, app_name, app_description, app_features, app_dependencies, app_space, app_tutorial
50
+ )
51
 
52
+ # Generate code using the Llama model
53
+ code = self.generate_code(prompt)
54
 
55
+ # Generate files for the application
56
+ files = self.prompts["generateFiles"](
57
+ app_type, app_name, app_description, app_features, app_dependencies, app_space, app_tutorial
58
+ )
59
 
60
+ # Return the generated code and files
61
+ return f"Code: {code}\nFiles: {files}"
62
 
63
+ def parse_input(self, user_input: str) -> tuple:
64
+ """
65
+ Parses the user's input and extracts the relevant information.
66
+ """
67
 
68
+ # Extract the app type
69
+ app_type = self.extract_app_type(user_input)
 
 
 
70
 
71
+ # Extract the app name
72
+ app_name = self.extract_app_name(user_input)
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # Extract the app description
75
+ app_description = self.extract_app_description(user_input)
76
 
77
+ # Extract the app features
78
+ app_features = self.extract_app_features(user_input)
 
 
79
 
80
+ # Extract the app dependencies
81
+ app_dependencies = self.extract_app_dependencies(user_input)
82
 
83
+ # Extract the app space
84
+ app_space = self.extract_app_space(user_input)
85
 
86
+ # Extract the app tutorial
87
+ app_tutorial = self.extract_app_tutorial(user_input)
88
 
89
+ return app_type, app_name, app_description, app_features, app_dependencies, app_space, app_tutorial
90
 
91
+ def extract_app_type(self, user_input: str) -> AppType:
92
+ """
93
+ Extracts the app type from the user's input.
94
+ """
95
 
96
+ # Check if the user specified a specific app type
97
+ if "web app" in user_input:
98
+ return AppType.WEB_APP
99
+ elif "gradio app" in user_input:
100
+ return AppType.GRADIO_APP
101
+ elif "streamlit app" in user_input:
102
+ return AppType.STREAMLIT_APP
103
+ elif "react app" in user_input:
104
+ return AppType.REACT_APP
105
 
106
+ # Otherwise, assume the user wants a web app
107
+ return AppType.WEB_APP
108
+
109
+ def extract_app_name(self, user_input: str) -> str:
110
+ """
111
+ Extracts the app name from the user's input.
112
+ """
113
+
114
+ # Find the substring "app name is:"
115
+ start_index = user_input.find("app name is:") + len("app name is:")
116
+
117
+ # Find the end of the app name
118
+ end_index = user_input.find(".", start_index)
119
+
120
+ # Extract the app name
121
+ app_name = user_input[start_index:end_index].strip()
122
+
123
+ return app_name
124
+
125
+ def extract_app_description(self, user_input: str) -> str:
126
+ """
127
+ Extracts the app description from the user's input.
128
+ """
129
+
130
+ # Find the substring "app description is:"
131
+ start_index = user_input.find("app description is:") + len("app description is:")
132
+
133
+ # Find the end of the app description
134
+ end_index = user_input.find(".", start_index)
135
+
136
+ # Extract the app description
137
+ app_description = user_input[start_index:end_index].strip()
138
+
139
+ return app_description
140
+
141
+ def extract_app_features(self, user_input: str) -> List[str]:
142
+ """
143
+ Extracts the app features from the user's input.
144
+ """
145
+
146
+ # Find the substring "app features are:"
147
+ start_index = user_input.find("app features are:") + len("app features are:")
148
+
149
+ # Find the end of the app features
150
+ end_index = user_input.find(".", start_index)
151
+
152
+ # Extract the app features
153
+ app_features_str = user_input[start_index:end_index].strip()
154
+
155
+ # Split the app features string into a list
156
+ app_features = app_features_str.split(", ")
157
+
158
+ return app_features
159
+
160
+ def extract_app_dependencies(self, user_input: str) -> List[str]:
161
+ """
162
+ Extracts the app dependencies from the user's input.
163
+ """
164
+
165
+ # Find the substring "app dependencies are:"
166
+ start_index = user_input.find("app dependencies are:") + len("app dependencies are:")
167
+
168
+ # Find the end of the app dependencies
169
+ end_index = user_input.find(".", start_index)
170
+
171
+ # Extract the app dependencies
172
+ app_dependencies_str = user_input[start_index:end_index].strip()
173
+
174
+ # Split the app dependencies string into a list
175
+ app_dependencies = app_dependencies_str.split(", ")
176
+
177
+ return app_dependencies
178
+
179
+ def extract_app_space(self, user_input: str) -> Optional[Space]:
180
+ """
181
+ Extracts the app space from the user's input.
182
+ """
183
+
184
+ # Find the substring "app space is:"
185
+ start_index = user_input.find("app space is:") + len("app space is:")
186
+
187
+ # Find the end of the app space
188
+ end_index = user_input.find(".", start_index)
189
+
190
+ # Extract the app space
191
+ app_space_str = user_input[start_index:end_index].strip()
192
+
193
+ # Create a Space object
194
+ app_space = Space(space=app_space_str)
195
+
196
+ return app_space
197
+
198
+ def extract_app_tutorial(self, user_input: str) -> Optional[Tutorial]:
199
+ """
200
+ Extracts the app tutorial from the user's input.
201
+ """
202
+
203
+ # Find the substring "app tutorial is:"
204
+ start_index = user_input.find("app tutorial is:") + len("app tutorial is:")
205
+
206
+ # Find the end of the app tutorial
207
+ end_index = user_input.find(".", start_index)
208
+
209
+ # Extract the app tutorial
210
+ app_tutorial_str = user_input[start_index:end_index].strip()
211
+
212
+ # Create a Tutorial object
213
+ app_tutorial = Tutorial(tutorial=app_tutorial_str)
214
+
215
+ return app_tutorial
216
+
217
+ def generate_code(self, prompt: Prompt) -> Code:
218
+ """
219
+ Generates code using the Llama model.
220
+ """
221
+
222
+ # Send the prompt to the Llama model
223
+ response = self.client(prompt.prompt)
224
+
225
+ # Extract the generated code
226
+ code = response["generated_text"]
227
+ code = code.replace("```", "")
228
+ code = code.replace("```", "")
229
+
230
+ # Create a Code object
231
+ code = Code(code=code, language="python")
232
+
233
+ return code
234
+
235
+ def generate_files(self, app_type: AppType, app_name: str, app_description: str, app_features: List[str], app_dependencies: List[str], app_space: Optional[Space] = None, app_tutorial: Optional[Tutorial] = None) -> List[File]:
236
+ """
237
+ Generates files for the application.
238
+ """
239
+
240
+ # Generate files based on the app type
241
+ files = self.prompts["generateFiles"](
242
+ app_type, app_name, app_description, app_features, app_dependencies, app_space, app_tutorial
243
  )
244
 
245
+ return files
246
+
247
+ def main():
248
+ """
249
+ Main function for the application.
250
+ """
251
+
252
+ # Create an agent
253
+ agent = Agent(
254
+ prompts={
255
+ "createLlamaPrompt": createLlamaPrompt,
256
+ "createSpace": createSpace,
257
+ "isPythonOrGradioAppPrompt": isPythonOrGradioAppPrompt,
258
+ "isReactAppPrompt": isReactAppPrompt,
259
+ "isStreamlitAppPrompt": isStreamlitAppPrompt,
260
+ "getWebApp": getWebApp,
261
+ "getGradioApp": getGradioApp,
262
+ "getReactApp": getReactApp,
263
+ "getStreamlitApp": getStreamlitApp,
264
+ "parseTutorial": parseTutorial,
265
+ "generateFiles": generateFiles,
266
+ }
267
+ )
268
+
269
+ # Get user input
270
+ user_input = input("Enter your request: ")
271
+
272
+ # Process the user's input
273
+ response = agent.process(user_input)
274
+
275
+ # Print the response
276
+ print(response)
277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  if __name__ == "__main__":
279
+ main()