Spaces:
Sleeping
Sleeping
acecalisto3
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,292 +1,261 @@
|
|
1 |
-
import os
|
2 |
-
import logging
|
3 |
import asyncio
|
4 |
-
import yaml
|
5 |
-
from typing import Dict, List, Any, Tuple, Optional
|
6 |
-
from abc import ABC, abstractmethod
|
7 |
-
|
8 |
import gradio as gr
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from langchain.agents import
|
12 |
-
from
|
13 |
-
from
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
load_dotenv()
|
18 |
-
|
19 |
-
# Custom Exceptions
|
20 |
-
class CodeFusionError(Exception):
|
21 |
-
"""Base exception class for CodeFusion."""
|
22 |
-
pass
|
23 |
-
|
24 |
-
class AgentInitializationError(CodeFusionError):
|
25 |
-
"""Raised when there's an error initializing the agent."""
|
26 |
-
pass
|
27 |
-
|
28 |
-
class ToolExecutionError(CodeFusionError):
|
29 |
-
"""Raised when there's an error executing a tool."""
|
30 |
-
pass
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
"""Load configuration from config.yaml file or use default values."""
|
35 |
-
config_path = 'config.yaml'
|
36 |
-
default_config = {
|
37 |
-
'model_name': "google/flan-t5-xl",
|
38 |
-
'api_key': "your_default_api_key_here",
|
39 |
-
'temperature': 0.5,
|
40 |
-
'verbose': True
|
41 |
-
}
|
42 |
-
|
43 |
try:
|
44 |
-
with open(config_path,
|
45 |
-
|
46 |
except FileNotFoundError:
|
47 |
-
print(
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
return config
|
53 |
-
|
54 |
-
def setup_logging() -> logging.Logger:
|
55 |
-
"""Set up logging configuration."""
|
56 |
-
logging.basicConfig(
|
57 |
-
level=logging.INFO,
|
58 |
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
59 |
-
filename='codefusion.log'
|
60 |
-
)
|
61 |
-
return logging.getLogger(__name__)
|
62 |
|
63 |
-
# Load configuration and set up logging
|
64 |
config = load_config()
|
65 |
-
logger = setup_logging()
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
"""Abstract base class for all tools used by the agent."""
|
70 |
-
|
71 |
-
def __init__(self, name: str, description: str):
|
72 |
-
self.name = name
|
73 |
-
self.description = description
|
74 |
-
self.llm = HuggingFaceHub(
|
75 |
-
repo_id=config['model_name'],
|
76 |
-
model_kwargs={"temperature": config['temperature']},
|
77 |
-
huggingfacehub_api_token=config['api_key']
|
78 |
-
)
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
86 |
-
|
|
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
input_variables=["language", "code_description"],
|
92 |
-
template="Generate {language} code for: {code_description}"
|
93 |
-
)
|
94 |
-
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
|
95 |
-
# Add this line:
|
96 |
-
self.is_single_input = True
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
return {"output": code}
|
104 |
-
except Exception as e:
|
105 |
-
logger.error(f"Error in CodeGenerationTool: {e}")
|
106 |
-
raise ToolExecutionError(f"Failed to generate code: {e}")
|
107 |
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
self.
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
116 |
)
|
117 |
-
|
118 |
-
# Add this line:
|
119 |
-
self.is_single_input = True
|
120 |
-
|
121 |
-
async def run(self, arguments: Dict[str, str]) -> Dict[str, str]:
|
122 |
-
code = arguments.get("code", "print('Hello, World!')")
|
123 |
-
try:
|
124 |
-
explanation = await self.chain.arun(code=code)
|
125 |
-
return {"output": explanation}
|
126 |
-
except Exception as e:
|
127 |
-
logger.error(f"Error in CodeExplanationTool: {e}")
|
128 |
-
raise ToolExecutionError(f"Failed to explain code: {e}")
|
129 |
-
|
130 |
-
class DebuggingTool(Tool):
|
131 |
-
"""Tool for debugging code snippets."""
|
132 |
-
|
133 |
-
def __init__(self):
|
134 |
-
super().__init__("Debugging", "Helps identify and fix issues in code snippets.")
|
135 |
self.prompt_template = PromptTemplate(
|
136 |
-
|
137 |
-
|
138 |
)
|
139 |
-
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
|
140 |
-
# Add this line:
|
141 |
-
self.is_single_input = True
|
142 |
-
|
143 |
-
async def run(self, arguments: Dict[str, str]) -> Dict[str, str]:
|
144 |
-
code = arguments.get("code", "")
|
145 |
-
error_message = arguments.get("error_message", "")
|
146 |
-
try:
|
147 |
-
debug_result = await self.chain.arun(code=code, error_message=error_message)
|
148 |
-
return {"output": debug_result}
|
149 |
-
except Exception as e:
|
150 |
-
logger.error(f"Error in DebuggingTool: {e}")
|
151 |
-
raise ToolExecutionError(f"Failed to debug code: {e}")
|
152 |
-
|
153 |
-
# Agent Class
|
154 |
-
class Agent:
|
155 |
-
"""Represents an AI agent with specific tools and capabilities."""
|
156 |
-
|
157 |
-
def __init__(self, name: str, role: str, tools: List[Tool]):
|
158 |
-
self.name = name
|
159 |
-
self.role = role
|
160 |
-
self.tools = tools
|
161 |
-
self.memory: List[tuple] = []
|
162 |
|
163 |
try:
|
164 |
-
self.
|
165 |
-
|
166 |
-
model_kwargs={"temperature": config['temperature']},
|
167 |
-
huggingfacehub_api_token=config['api_key']
|
168 |
-
)
|
169 |
-
# Convert tools to dictionaries
|
170 |
-
tools_dict = [
|
171 |
-
{"name": tool.name, "description": tool.description, "func": tool.run}
|
172 |
-
for tool in self.tools
|
173 |
-
]
|
174 |
-
self.agent = initialize_agent(
|
175 |
-
llm=self.llm,
|
176 |
-
tools=tools_dict,
|
177 |
-
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
178 |
-
verbose=config['verbose']
|
179 |
-
)
|
180 |
except Exception as e:
|
181 |
-
logger.error(f"Error initializing agent: {e}")
|
182 |
raise AgentInitializationError(f"Failed to initialize agent: {e}")
|
183 |
|
184 |
-
async def
|
185 |
-
"""Perform an action based on the given prompt and context."""
|
186 |
-
self.memory.append((prompt, context))
|
187 |
try:
|
188 |
-
|
189 |
-
return
|
190 |
except Exception as e:
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
def
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
current_agent_index = i
|
215 |
-
break
|
216 |
-
elif "ui" in message.lower():
|
217 |
-
current_agent_index = i
|
218 |
-
break
|
219 |
-
|
220 |
-
# Use the selected agent to process the message
|
221 |
-
current_agent = agent_structure[current_agent_index]
|
222 |
-
context = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
|
223 |
-
try:
|
224 |
-
response = await current_agent.act(message, context)
|
225 |
-
return response
|
226 |
-
except Exception as e:
|
227 |
-
logger.error(f"Error processing request: {e}")
|
228 |
-
return "I apologize, but an error occurred while processing your request. Please try again."
|
229 |
|
230 |
async def main():
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
[
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
).launch()
|
245 |
-
|
246 |
-
# Simple testing framework
|
247 |
-
def run_tests():
|
248 |
-
"""Run basic tests for the CodeFusion components."""
|
249 |
-
|
250 |
-
async def test_code_generation():
|
251 |
-
tool = CodeGenerationTool()
|
252 |
-
result = await tool.run({"language": "python", "code_description": "function to add two numbers"})
|
253 |
-
assert "def" in result["output"], "Code generation failed to produce a function"
|
254 |
-
print("Code Generation Test: Passed")
|
255 |
-
|
256 |
-
async def test_code_explanation():
|
257 |
-
tool = CodeExplanationTool()
|
258 |
-
result = await tool.run({"code": "def factorial(n):\n return 1 if n == 0 else n * factorial(n-1)"})
|
259 |
-
assert "recursive" in result["output"].lower(), "Code explanation failed to mention recursion"
|
260 |
-
print("Code Explanation Test: Passed")
|
261 |
-
|
262 |
-
async def test_debugging():
|
263 |
-
tool = DebuggingTool()
|
264 |
-
result = await tool.run({"code": "def divide(a, b):\n return a / b", "error_message": "ZeroDivisionError"})
|
265 |
-
assert "zero" in result["output"].lower(), "Debugging failed to address division by zero"
|
266 |
-
print("Debugging Test: Passed")
|
267 |
-
|
268 |
-
async def test_agent():
|
269 |
-
agent = Agent("TestAgent", "Tester", [CodeGenerationTool(), CodeExplanationTool(), DebuggingTool()])
|
270 |
-
result = await agent.act("Generate a Python function to calculate the square of a number", "")
|
271 |
-
assert "def" in result and "return" in result, "Agent failed to generate a proper function"
|
272 |
-
print("Agent Test: Passed")
|
273 |
-
|
274 |
-
async def run_all_tests():
|
275 |
-
await test_code_generation()
|
276 |
-
await test_code_explanation()
|
277 |
-
await test_debugging()
|
278 |
-
await test_agent()
|
279 |
-
|
280 |
-
asyncio.run(run_all_tests())
|
281 |
|
282 |
-
|
283 |
|
284 |
if __name__ == "__main__":
|
285 |
-
|
286 |
-
if len(sys.argv) > 1 and sys.argv[1] == "--test":
|
287 |
-
run_tests()
|
288 |
-
else:
|
289 |
-
asyncio.run(main())
|
290 |
-
interface = gradio_interface()
|
291 |
-
interface.queue()
|
292 |
-
interface.launch()
|
|
|
|
|
|
|
1 |
import asyncio
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
4 |
+
from langchain_core.prompts import PromptTemplate
|
5 |
+
from langchain.agents import create_react_agent, AgentExecutor
|
6 |
+
from langchain_core.tools import BaseTool
|
7 |
+
from typing import List
|
8 |
+
import yaml
|
9 |
+
import os
|
10 |
+
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
def load_config():
|
13 |
+
config_path = os.path.join(os.path.dirname(__file__), "config.yaml")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
try:
|
15 |
+
with open(config_path, "r") as config_file:
|
16 |
+
return yaml.safe_load(config_file)
|
17 |
except FileNotFoundError:
|
18 |
+
print("Config file not found. Using default configuration.")
|
19 |
+
return {
|
20 |
+
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
21 |
+
"hf_api_token": os.environ.get("HUGGINGFACEHUB_API_TOKEN", "your_default_token_here")
|
22 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
|
|
24 |
config = load_config()
|
|
|
25 |
|
26 |
+
class AgentInitializationError(Exception):
|
27 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
class CodeGenerationTool(BaseTool):
|
30 |
+
name = "CodeGeneration"
|
31 |
+
description = "Generates code based on a prompt"
|
32 |
+
|
33 |
+
def _run(self, prompt: str) -> str:
|
34 |
+
# Implement basic code generation logic
|
35 |
+
if "Flask app structure" in prompt:
|
36 |
+
return self.generate_flask_app_structure()
|
37 |
+
elif "binary search algorithm" in prompt:
|
38 |
+
return self.generate_binary_search()
|
39 |
+
elif "responsive navbar" in prompt:
|
40 |
+
return self.generate_responsive_navbar()
|
41 |
+
else:
|
42 |
+
return f"Generated code placeholder for: {prompt}"
|
43 |
+
|
44 |
+
async def _arun(self, prompt: str) -> str:
|
45 |
+
return self._run(prompt)
|
46 |
+
|
47 |
+
def generate_flask_app_structure(self):
|
48 |
+
return """
|
49 |
+
# app.py
|
50 |
+
from flask import Flask, render_template
|
51 |
+
|
52 |
+
app = Flask(__name__)
|
53 |
+
|
54 |
+
@app.route('/')
|
55 |
+
def home():
|
56 |
+
return render_template('index.html')
|
57 |
+
|
58 |
+
if __name__ == '__main__':
|
59 |
+
app.run(debug=True)
|
60 |
+
|
61 |
+
# templates/index.html
|
62 |
+
<!DOCTYPE html>
|
63 |
+
<html lang="en">
|
64 |
+
<head>
|
65 |
+
<meta charset="UTF-8">
|
66 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
67 |
+
<title>Flask App</title>
|
68 |
+
</head>
|
69 |
+
<body>
|
70 |
+
<h1>Welcome to Flask!</h1>
|
71 |
+
</body>
|
72 |
+
</html>
|
73 |
+
"""
|
74 |
+
|
75 |
+
def generate_binary_search(self):
|
76 |
+
return """
|
77 |
+
def binary_search(arr, target):
|
78 |
+
left, right = 0, len(arr) - 1
|
79 |
+
|
80 |
+
while left <= right:
|
81 |
+
mid = (left + right) // 2
|
82 |
+
if arr[mid] == target:
|
83 |
+
return mid
|
84 |
+
elif arr[mid] < target:
|
85 |
+
left = mid + 1
|
86 |
+
else:
|
87 |
+
right = mid - 1
|
88 |
+
|
89 |
+
return -1 # Target not found
|
90 |
+
|
91 |
+
# Example usage
|
92 |
+
sorted_array = [1, 3, 5, 7, 9, 11, 13, 15]
|
93 |
+
target = 7
|
94 |
+
result = binary_search(sorted_array, target)
|
95 |
+
print(f"Target {target} found at index: {result}")
|
96 |
+
"""
|
97 |
+
|
98 |
+
def generate_responsive_navbar(self):
|
99 |
+
return """
|
100 |
+
<!-- HTML -->
|
101 |
+
<nav class="navbar">
|
102 |
+
<div class="navbar-logo">Logo</div>
|
103 |
+
<ul class="navbar-links">
|
104 |
+
<li><a href="#home">Home</a></li>
|
105 |
+
<li><a href="#about">About</a></li>
|
106 |
+
<li><a href="#services">Services</a></li>
|
107 |
+
<li><a href="#contact">Contact</a></li>
|
108 |
+
</ul>
|
109 |
+
<div class="navbar-toggle">
|
110 |
+
<span class="bar"></span>
|
111 |
+
<span class="bar"></span>
|
112 |
+
<span class="bar"></span>
|
113 |
+
</div>
|
114 |
+
</nav>
|
115 |
+
|
116 |
+
<!-- CSS -->
|
117 |
+
<style>
|
118 |
+
.navbar {
|
119 |
+
display: flex;
|
120 |
+
justify-content: space-between;
|
121 |
+
align-items: center;
|
122 |
+
padding: 1rem 2rem;
|
123 |
+
background-color: #333;
|
124 |
+
color: white;
|
125 |
+
}
|
126 |
+
|
127 |
+
.navbar-logo {
|
128 |
+
font-size: 1.5rem;
|
129 |
+
font-weight: bold;
|
130 |
+
}
|
131 |
+
|
132 |
+
.navbar-links {
|
133 |
+
display: flex;
|
134 |
+
list-style: none;
|
135 |
+
}
|
136 |
+
|
137 |
+
.navbar-links li {
|
138 |
+
margin-left: 1rem;
|
139 |
+
}
|
140 |
+
|
141 |
+
.navbar-links a {
|
142 |
+
color: white;
|
143 |
+
text-decoration: none;
|
144 |
+
}
|
145 |
+
|
146 |
+
.navbar-toggle {
|
147 |
+
display: none;
|
148 |
+
flex-direction: column;
|
149 |
+
cursor: pointer;
|
150 |
+
}
|
151 |
+
|
152 |
+
.bar {
|
153 |
+
width: 25px;
|
154 |
+
height: 3px;
|
155 |
+
background-color: white;
|
156 |
+
margin: 3px 0;
|
157 |
+
}
|
158 |
+
|
159 |
+
@media (max-width: 768px) {
|
160 |
+
.navbar-links {
|
161 |
+
display: none;
|
162 |
+
flex-direction: column;
|
163 |
+
width: 100%;
|
164 |
+
position: absolute;
|
165 |
+
top: 60px;
|
166 |
+
left: 0;
|
167 |
+
background-color: #333;
|
168 |
+
}
|
169 |
|
170 |
+
.navbar-links.active {
|
171 |
+
display: flex;
|
172 |
+
}
|
173 |
|
174 |
+
.navbar-links li {
|
175 |
+
margin: 1rem 0;
|
176 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
+
.navbar-toggle {
|
179 |
+
display: flex;
|
180 |
+
}
|
181 |
+
}
|
182 |
+
</style>
|
|
|
|
|
|
|
|
|
183 |
|
184 |
+
<!-- JavaScript -->
|
185 |
+
<script>
|
186 |
+
document.querySelector('.navbar-toggle').addEventListener('click', function() {
|
187 |
+
document.querySelector('.navbar-links').classList.toggle('active');
|
188 |
+
});
|
189 |
+
</script>
|
190 |
+
"""
|
191 |
|
192 |
+
class Agent:
|
193 |
+
def __init__(self, name: str, description: str, tools: List[BaseTool]):
|
194 |
+
self.name = name
|
195 |
+
self.description = description
|
196 |
+
self.llm = HuggingFaceEndpoint(
|
197 |
+
repo_id=config["model"],
|
198 |
+
task="text-generation",
|
199 |
+
model_kwargs={"temperature": 0.7, "max_length": 1024},
|
200 |
+
huggingfacehub_api_token=config["hf_api_token"]
|
201 |
)
|
202 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
self.prompt_template = PromptTemplate(
|
204 |
+
template="You are {name}, {description}. Respond to the following: {input}",
|
205 |
+
input_variables=["name", "description", "input"]
|
206 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
try:
|
209 |
+
self.agent = create_react_agent(self.llm, tools, self.prompt_template)
|
210 |
+
self.agent_executor = AgentExecutor(agent=self.agent, tools=tools, verbose=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
except Exception as e:
|
|
|
212 |
raise AgentInitializationError(f"Failed to initialize agent: {e}")
|
213 |
|
214 |
+
async def run(self, input_text: str) -> str:
|
|
|
|
|
215 |
try:
|
216 |
+
result = await self.agent_executor.arun(input_text)
|
217 |
+
return result
|
218 |
except Exception as e:
|
219 |
+
return f"Error: {str(e)}"
|
220 |
+
|
221 |
+
class CodeFusion:
|
222 |
+
def __init__(self):
|
223 |
+
code_gen_tool = CodeGenerationTool()
|
224 |
+
self.agents = [
|
225 |
+
Agent("CodeFusion_Structure", "App Structure Designer", [code_gen_tool]),
|
226 |
+
Agent("CodeFusion_Logic", "Logic Implementation Expert", [code_gen_tool]),
|
227 |
+
Agent("CodeFusion_UI", "User Interface Designer", [code_gen_tool])
|
228 |
+
]
|
229 |
+
|
230 |
+
async def run(self, input_text: str) -> str:
|
231 |
+
results = []
|
232 |
+
for agent in self.agents:
|
233 |
+
result = await agent.run(input_text)
|
234 |
+
results.append(f"{agent.name}: {result}")
|
235 |
+
return "\n\n".join(results)
|
236 |
+
|
237 |
+
code_fusion = CodeFusion()
|
238 |
+
|
239 |
+
async def chat(message, history):
|
240 |
+
response = await code_fusion.run(message)
|
241 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
|
243 |
async def main():
|
244 |
+
iface = gr.ChatInterface(
|
245 |
+
fn=chat,
|
246 |
+
title="CodeFusion AI",
|
247 |
+
description="Your AI-powered coding assistant",
|
248 |
+
examples=[
|
249 |
+
"Create a basic Flask app structure",
|
250 |
+
"Implement a binary search algorithm in Python",
|
251 |
+
"Design a responsive navbar using HTML and CSS"
|
252 |
+
],
|
253 |
+
retry_btn=None,
|
254 |
+
undo_btn="Delete Previous",
|
255 |
+
clear_btn="Clear",
|
256 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
+
await iface.launch()
|
259 |
|
260 |
if __name__ == "__main__":
|
261 |
+
asyncio.run(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|