acecalisto3 commited on
Commit
cbf6e45
·
verified ·
1 Parent(s): d6eda24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -55
app.py CHANGED
@@ -5,6 +5,14 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
  import black
6
  from pylint import lint
7
  from io import StringIO
 
 
 
 
 
 
 
 
8
 
9
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/Mistri"
10
  PROJECT_ROOT = "projects"
@@ -24,6 +32,8 @@ if 'current_state' not in st.session_state:
24
  'toolbox': {},
25
  'workspace_chat': {}
26
  }
 
 
27
 
28
  class AIAgent:
29
  def __init__(self, name, description, skills):
@@ -34,21 +44,25 @@ class AIAgent:
34
  def create_agent_prompt(self):
35
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
36
  agent_prompt = f"""
37
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
38
- {skills_str}
39
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
40
  """
41
  return agent_prompt
42
 
43
  def autonomous_build(self, chat_history, workspace_projects):
44
- """
45
- Autonomous build logic that continues based on the state of chat history and workspace projects.
46
- """
47
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
48
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
49
 
50
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
51
-
 
 
 
 
 
 
 
 
52
  return summary, next_step
53
 
54
  def save_agent_to_file(agent):
@@ -93,35 +107,85 @@ def chat_interface_with_agent(input_text, agent_name):
93
  model = AutoModelForCausalLM.from_pretrained(model_name)
94
  tokenizer = AutoTokenizer.from_pretrained(model_name)
95
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
96
- except EnvironmentError as e:
97
- return f"Error loading model: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  # Combine the agent prompt with user input
100
- combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
101
-
102
- # Truncate input text to avoid exceeding the model's maximum length
103
- max_input_length = 900
104
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
105
- if input_ids.shape[1] > max_input_length:
106
- input_ids = input_ids[:, :max_input_length]
107
 
108
  # Generate chatbot response
109
- outputs = model.generate(
110
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
111
- )
112
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
113
  return response
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  def workspace_interface(project_name):
116
  project_path = os.path.join(PROJECT_ROOT, project_name)
117
  if not os.path.exists(PROJECT_ROOT):
118
  os.makedirs(PROJECT_ROOT)
119
  if not os.path.exists(project_path):
120
  os.makedirs(project_path)
121
- st.session_state.workspace_projects[project_name] = {"files": []}
122
- st.session_state.current_state['workspace_chat']['project_name'] = project_name
123
- commit_and_push_changes(f"Create project {project_name}")
124
- return f"Project {project_name} created successfully."
125
  else:
126
  return f"Project {project_name} already exists."
127
 
@@ -166,25 +230,39 @@ def sentiment_analysis(text):
166
  return sentiment[0]
167
 
168
  def translate_code(code, input_language, output_language):
169
- # Define a dictionary to map programming languages to their corresponding file extensions
170
  language_extensions = {
171
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  }
173
 
174
- # Add code to handle edge cases such as invalid input and unsupported programming languages
175
  if input_language not in language_extensions:
176
  raise ValueError(f"Invalid input language: {input_language}")
177
  if output_language not in language_extensions:
178
  raise ValueError(f"Invalid output language: {output_language}")
179
 
180
- # Use the dictionary to map the input and output languages to their corresponding file extensions
181
  input_extension = language_extensions[input_language]
182
  output_extension = language_extensions[output_language]
183
 
184
  # Translate the code using the OpenAI API
185
  prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
186
  response = openai.ChatCompletion.create(
187
- model="gpt-4",
188
  messages=[
189
  {"role": "system", "content": "You are an expert software developer."},
190
  {"role": "user", "content": prompt}
@@ -193,7 +271,6 @@ def translate_code(code, input_language, output_language):
193
  translated_code = response.choices[0].message['content'].strip()
194
 
195
  # Return the translated code
196
- translated_code = response.choices[0].message['content'].strip()
197
  st.session_state.current_state['toolbox']['translated_code'] = translated_code
198
  return translated_code
199
 
@@ -203,11 +280,10 @@ def generate_code(code_idea):
203
  generator = pipeline('text-generation', model='gpt4o')
204
  generated_code = generator(code_idea, max_length=10000, num_return_sequences=1)[0]['generated_text']
205
  messages=[
206
- {"role": "system", "content": "You are an expert software developer."},
207
- {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
208
- ]
209
  st.session_state.current_state['toolbox']['generated_code'] = generated_code
210
-
211
  return generated_code
212
 
213
  def run_git_command(command):
@@ -247,23 +323,22 @@ if scope == "Local" and not is_git_repository():
247
 
248
  if st.button("Set Git Configuration"):
249
  global_flag = "--global" if scope == "Global" else ""
250
-
251
  email_command = f"git config {global_flag} user.email \"{email}\""
252
  name_command = f"git config {global_flag} user.name \"{name}\""
253
-
254
  success, email_result = run_git_command(email_command)
255
  if success:
256
  st.success("Email configuration set successfully!")
257
  else:
258
  st.error(f"Failed to set email configuration: {email_result}")
259
-
260
  success, name_result = run_git_command(name_command)
261
  if success:
262
  st.success("Name configuration set successfully!")
263
  else:
264
  st.error(f"Failed to set name configuration: {name_result}")
265
-
266
- st.write("---")
267
 
268
  if st.button("View Current Git Configuration"):
269
  success, config_result = run_git_command("git config --list")
@@ -273,16 +348,6 @@ if st.button("View Current Git Configuration"):
273
  else:
274
  st.error(f"Failed to retrieve Git configuration: {config_result}")
275
 
276
- def commit_and_push_changes(commit_message):
277
- """Commits and pushes changes to the Hugging Face repository."""
278
- try:
279
- subprocess.run(["git", "add", "."], check=True)
280
- subprocess.run(["git", "commit", "-m", commit_message], check=True)
281
- subprocess.run(["git", "push", "origin", "main"], check=True)
282
- return True, "Changes committed and pushed successfully."
283
- except subprocess.CalledProcessError as e:
284
- return False, f"Error in Git operations: {e}"
285
-
286
  # Streamlit App
287
  st.title("AI Agent Creator")
288
 
@@ -331,7 +396,8 @@ elif app_mode == "Tool Box":
331
  st.subheader("Code Editor")
332
  code_editor = st.text_area("Write your code:", height=300)
333
  if st.button("Format & Lint"):
334
- formatted_code, lint_message = code_editor_interface(code_editor)
 
335
  st.code(formatted_code, language="python")
336
  st.info(lint_message)
337
 
@@ -377,7 +443,7 @@ elif app_mode == "Tool Box":
377
  "Translate code": "translate_code('code', 'source_language', 'target_language')",
378
  }
379
  for command_name, command in preset_commands.items():
380
- st.write(f"{command_name}: `{command}`")
381
 
382
  elif app_mode == "Workspace Chat App":
383
  # Workspace Chat App
@@ -430,7 +496,7 @@ elif app_mode == "Workspace Chat App":
430
  for project, details in st.session_state.workspace_projects.items():
431
  st.write(f"Project: {project}")
432
  for file in details['files']:
433
- st.write(f" - {file}")
434
 
435
  # Chat with AI Agents
436
  st.subheader("Chat with AI Agents")
@@ -451,5 +517,54 @@ elif app_mode == "Workspace Chat App":
451
  st.write("Next Step:")
452
  st.write(next_step)
453
 
454
- # Display current state for debugging
455
- st.sidebar.subheader("Current State")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import black
6
  from pylint import lint
7
  from io import StringIO
8
+ import re
9
+ from typing import Dict, Any
10
+ from langchain.embeddings import HuggingFaceEmbeddings
11
+ from langchain.vectorstores import FAISS
12
+ from langchain.llms import OpenAI
13
+ from langchain.chains import ConversationChain
14
+ from concurrent.futures import ThreadPoolExecutor
15
+ import openai
16
 
17
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/Mistri"
18
  PROJECT_ROOT = "projects"
 
32
  'toolbox': {},
33
  'workspace_chat': {}
34
  }
35
+ if 'current_agent' not in st.session_state:
36
+ st.session_state.current_agent = None
37
 
38
  class AIAgent:
39
  def __init__(self, name, description, skills):
 
44
  def create_agent_prompt(self):
45
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
46
  agent_prompt = f"""
47
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas: {skills_str} I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
 
 
48
  """
49
  return agent_prompt
50
 
51
  def autonomous_build(self, chat_history, workspace_projects):
52
+ """Autonomous build logic that continues based on the state of chat history and workspace projects."""
 
 
53
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
54
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
55
 
56
+ # Use an LLM to generate a summary of the current state and suggest the next step
57
+ model_name = "gpt-3.5-turbo" # You can choose a different LLM if you prefer
58
+ response = openai.ChatCompletion.create(
59
+ model=model_name,
60
+ messages=[
61
+ {"role": "system", "content": "You are a helpful AI assistant that can help developers with building projects. "},
62
+ {"role": "user", "content": f"Based on the following summary, what is the next logical step in the development process? \n\n{summary}"}
63
+ ]
64
+ )
65
+ next_step = response.choices[0].message['content']
66
  return summary, next_step
67
 
68
  def save_agent_to_file(agent):
 
107
  model = AutoModelForCausalLM.from_pretrained(model_name)
108
  tokenizer = AutoTokenizer.from_pretrained(model_name)
109
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
110
+ response = generator(
111
+ f"{agent_prompt}\n\nUser: {input_text}\nAgent:",
112
+ max_length=1024,
113
+ num_return_sequences=1,
114
+ do_sample=True,
115
+ top_k=50,
116
+ temperature=0.7,
117
+ )[0]["generated_text"]
118
+ return response
119
+ except Exception as e:
120
+ return f"Error: {e}"
121
+
122
+ # Chat interface for CodeCraft
123
+ def chat_interface(input_text):
124
+ """Handles interactions with the CodeCraft AI agent."""
125
+ # Define a pre-defined prompt for CodeCraft
126
+ codecraft_prompt = """
127
+ You are CodeCraft, a helpful and knowledgeable AI assistant specializing in software development.
128
+ You are designed to provide guidance, code snippets, and solutions to developers.
129
+ Please answer user questions in a comprehensive and informative manner.
130
+ """
131
+
132
+ # Load the GPT-2 model
133
+ model_name = "gpt2" # You can use a more advanced model like GPT-3 or GPT-4 if you have access
134
+ model = AutoModelForCausalLM.from_pretrained(model_name)
135
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
136
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
137
 
138
  # Combine the agent prompt with user input
139
+ combined_input = f"{codecraft_prompt}\n\nUser: {input_text}\nCodeCraft:"
 
 
 
 
 
 
140
 
141
  # Generate chatbot response
142
+ response = generator(
143
+ combined_input,
144
+ max_length=1024,
145
+ num_return_sequences=1,
146
+ do_sample=True,
147
+ top_k=50,
148
+ temperature=0.7,
149
+ )[0]["generated_text"]
150
  return response
151
 
152
+ def commit_and_push_changes(commit_message):
153
+ """Commits and pushes changes to the Hugging Face repository."""
154
+ try:
155
+ os.system(f"git add .")
156
+ os.system(f"git commit -m '{commit_message}'")
157
+ os.system(f"git push origin main")
158
+ st.write(f"Changes committed and pushed to {HUGGING_FACE_REPO_URL}")
159
+ except Exception as e:
160
+ st.write(f"Error committing and pushing changes: {e}")
161
+
162
+ def format_code(code):
163
+ """Formats the code using Black and checks for linting errors."""
164
+ try:
165
+ formatted_code = black.format_str(code, mode=black.FileMode())
166
+ return formatted_code
167
+ except Exception as e:
168
+ return f"Error formatting code: {e}"
169
+
170
+ def lint_code(code):
171
+ """Checks the code for linting errors using Pylint."""
172
+ try:
173
+ output = StringIO()
174
+ lint.run(code, do_exit=False, output=output)
175
+ return output.getvalue()
176
+ except Exception as e:
177
+ return f"Error linting code: {e}"
178
+
179
  def workspace_interface(project_name):
180
  project_path = os.path.join(PROJECT_ROOT, project_name)
181
  if not os.path.exists(PROJECT_ROOT):
182
  os.makedirs(PROJECT_ROOT)
183
  if not os.path.exists(project_path):
184
  os.makedirs(project_path)
185
+ st.session_state.workspace_projects[project_name] = {"files": []}
186
+ st.session_state.current_state['workspace_chat']['project_name'] = project_name
187
+ commit_and_push_changes(f"Create project {project_name}")
188
+ return f"Project {project_name} created successfully."
189
  else:
190
  return f"Project {project_name} already exists."
191
 
 
230
  return sentiment[0]
231
 
232
  def translate_code(code, input_language, output_language):
 
233
  language_extensions = {
234
+ "Python": ".py",
235
+ "JavaScript": ".js",
236
+ "Java": ".java",
237
+ "C++": ".cpp",
238
+ "C#": ".cs",
239
+ "Go": ".go",
240
+ "Ruby": ".rb",
241
+ "PHP": ".php",
242
+ "Swift": ".swift",
243
+ "Kotlin": ".kt",
244
+ "TypeScript": ".ts",
245
+ "Rust": ".rs",
246
+ "Scala": ".scala",
247
+ "Dart": ".dart",
248
+ "Lua": ".lua",
249
+ "HTML": ".html",
250
+ "CSS": ".css",
251
+ "SQL": ".sql"
252
  }
253
 
 
254
  if input_language not in language_extensions:
255
  raise ValueError(f"Invalid input language: {input_language}")
256
  if output_language not in language_extensions:
257
  raise ValueError(f"Invalid output language: {output_language}")
258
 
 
259
  input_extension = language_extensions[input_language]
260
  output_extension = language_extensions[output_language]
261
 
262
  # Translate the code using the OpenAI API
263
  prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
264
  response = openai.ChatCompletion.create(
265
+ model="gpt-3.5-turbo",
266
  messages=[
267
  {"role": "system", "content": "You are an expert software developer."},
268
  {"role": "user", "content": prompt}
 
271
  translated_code = response.choices[0].message['content'].strip()
272
 
273
  # Return the translated code
 
274
  st.session_state.current_state['toolbox']['translated_code'] = translated_code
275
  return translated_code
276
 
 
280
  generator = pipeline('text-generation', model='gpt4o')
281
  generated_code = generator(code_idea, max_length=10000, num_return_sequences=1)[0]['generated_text']
282
  messages=[
283
+ {"role": "system", "content": "You are an expert software developer."},
284
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
285
+ ]
286
  st.session_state.current_state['toolbox']['generated_code'] = generated_code
 
287
  return generated_code
288
 
289
  def run_git_command(command):
 
323
 
324
  if st.button("Set Git Configuration"):
325
  global_flag = "--global" if scope == "Global" else ""
326
+
327
  email_command = f"git config {global_flag} user.email \"{email}\""
328
  name_command = f"git config {global_flag} user.name \"{name}\""
329
+
330
  success, email_result = run_git_command(email_command)
331
  if success:
332
  st.success("Email configuration set successfully!")
333
  else:
334
  st.error(f"Failed to set email configuration: {email_result}")
335
+
336
  success, name_result = run_git_command(name_command)
337
  if success:
338
  st.success("Name configuration set successfully!")
339
  else:
340
  st.error(f"Failed to set name configuration: {name_result}")
341
+ st.write("---")
 
342
 
343
  if st.button("View Current Git Configuration"):
344
  success, config_result = run_git_command("git config --list")
 
348
  else:
349
  st.error(f"Failed to retrieve Git configuration: {config_result}")
350
 
 
 
 
 
 
 
 
 
 
 
351
  # Streamlit App
352
  st.title("AI Agent Creator")
353
 
 
396
  st.subheader("Code Editor")
397
  code_editor = st.text_area("Write your code:", height=300)
398
  if st.button("Format & Lint"):
399
+ formatted_code = format_code(code_editor)
400
+ lint_message = lint_code(code_editor)
401
  st.code(formatted_code, language="python")
402
  st.info(lint_message)
403
 
 
443
  "Translate code": "translate_code('code', 'source_language', 'target_language')",
444
  }
445
  for command_name, command in preset_commands.items():
446
+ st.write(f"{command_name}: {command}")
447
 
448
  elif app_mode == "Workspace Chat App":
449
  # Workspace Chat App
 
496
  for project, details in st.session_state.workspace_projects.items():
497
  st.write(f"Project: {project}")
498
  for file in details['files']:
499
+ st.write(f" - {file}")
500
 
501
  # Chat with AI Agents
502
  st.subheader("Chat with AI Agents")
 
517
  st.write("Next Step:")
518
  st.write(next_step)
519
 
520
+ # Display current state for debugging
521
+ st.sidebar.subheader("Current State")
522
+ st.sidebar.write(st.session_state.current_state)
523
+
524
+ def show_chat_history(current_agent):
525
+ """Displays the chat history."""
526
+ if current_agent in st.session_state.current_state['workspace_chat']:
527
+ chat_msgs = st.session_state.current_state['workspace_chat'][current_agent]['messages']
528
+ for msg_input, _ in reversed(chat_msgs):
529
+ st.markdown(f"<s>{current_agent}: {msg_input}</s>", unsafe_allow_html=True)
530
+ return ""
531
+
532
+ def get_agent_response(agent, input_text):
533
+ """Gets the response from an agent for a given input."""
534
+ if agent is None:
535
+ return "No agent selected."
536
+ elif agent == "CodeCraft":
537
+ return chat_interface(input_text)
538
+ else:
539
+ return chat_interface_with_agent(input_text, agent)
540
+
541
+ def send_message_to_agent(agent, input_text):
542
+ """Sends a message to an agent and displays its response."""
543
+ output_text = get_agent_response(agent, input_text)
544
+ st.markdown(f"{agent}: {output_text}")
545
+
546
+ def process_chat_input(input_text, current_agent):
547
+ """Processes a chat input by checking special characters and forwarding it to appropriate handlers."""
548
+ if re.match(r'^\W+$', input_text): # Mention other agents
549
+ words = input_text[1:].split(' ')
550
+ mentioned_agents = [' '.join(words[:i]+['']) for i in range(len(words)+1)]
551
+ if len(mentioned_agents) > 1:
552
+ del mentioned_agents[-1]
553
+ mention_msg = f"@{current_agent}, you mentioned: {' | '.join(mentioned_agents)}"
554
+ st.markdown(mention_msg)
555
+ for agent in mentioned_agents:
556
+ if agent != '' and agent in st.session_state.available_agents:
557
+ st.session_state.current_state['workspace_chat'][agent] = {'messages': [], 'active': True}
558
+ st.session_state.current_state['workspace_chat'][current_agent]['messages'].append((input_text, ""))
559
+ elif re.fullmatch(r'\d+', input_text): # Select agent by number
560
+ if int(input_text)-1 < len(st.session_state.available_agents):
561
+ st.session_state.current_agent = st.session_state.available_agents[int(input_text)-1]
562
+ elif input_text == '/clear': # Clear conversation
563
+ st.session_state.current_state['workspace_chat'][current_agent]['messages'] = []
564
+ elif input_text == '/exit': # Exit conversation
565
+ st.session_state.current_agent = None
566
+ else:
567
+ st.session_state.current_state['workspace_chat'][current_agent]['messages'].append((input_text, ""))
568
+
569
+ if __name__ == "__main__":
570
+ main()