acecalisto3 commited on
Commit
1e5fd12
·
verified ·
1 Parent(s): d6a0c0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -142
app.py CHANGED
@@ -1,151 +1,168 @@
 
1
  import gradio as gr
2
- import os
3
  import random
4
- import requests
5
- import asyncio
6
- from typing import List, Tuple, Any, Callable, Optional
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- os.environ['GRADIO__LAUNCHER_LOADING'] = 'True'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def download_file(url: str, save_directory: str) -> str:
11
- if not os.path.exists(save_directory):
12
- os.makedirs(save_directory)
13
 
14
- resp = requests.get(url)
15
- file_name = os.path.basename(url)
16
- saved_file_path = os.path.join(save_directory, file_name)
17
 
18
- with open(saved_file_path, 'wb') as f:
19
- f.write(resp.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- return saved_file_path
22
 
23
- def process_download_and_execute(file_urls: List[str], func: Callable[[Optional[List[Any]]], str]) -> Callable[[Any], str]:
24
- def wrapper(*args, **kwargs) -> str:
25
- processed_histories = []
26
-
27
- for url in file_urls:
28
- temp_dir = '/tmp/{}'.format('-'.join(filter(lambda x: x.isalpha(), url)))
29
- download_file(url, temp_dir)
30
- content = download_file(temp_dir, '/tmp')
31
- processed_histories.extend(process_download_results([content]))
32
-
33
- return func(processed_histories, *args, **kwargs)
34
-
35
- return wrapper
36
-
37
- def tokenize_markdown(text: str) -> List[Tuple[str, bool]]:
38
- lines = filter(bool, map(lambda x: x.strip(), text.split('\n')))
39
- histories = []
40
- current_chunk = ''
41
- was_assistant = False
42
-
43
- for idx, line in enumerate(lines):
44
- chunk = '{}. {} '.format(idx + 1, line)
45
- first_char = line[0]
46
-
47
- hashtags = ['#', '@']
48
- is_hashtag = lambda h: first_char.startswith(h) and h not in current_chunk
49
-
50
- is_starting_line = ('[{' in line) and ('}' in line)
51
-
52
- if is_hashtag(hashtags[was_assistant]):
53
- historical_entry = handle_history_line(current_chunk)
54
- if historical_entry:
55
- histories.append(historical_entry)
56
-
57
- was_assistant = was_assistant ^ (first_char in {'@'})
58
- current_chunk = chunk
59
- else:
60
- current_chunk += chunk
61
-
62
- if is_starting_line and '[inst]' in line:
63
- histories.append(handle_history_line(current_chunk))
64
- current_chunk = ''
65
-
66
- return histories
67
-
68
- def handle_history_line(text: str) -> Optional[Tuple[str, str]]:
69
- dash_positions = [-1, -1]
70
- for idx, c in enumerate(text):
71
- if c == '-':
72
- dash_positions[1] = dash_positions[0]
73
- dash_positions[0] = idx
74
- if c == ' ' and 0 <= dash_positions[0] <= idx <= dash_positions[1]:
75
- delimiter = text[dash_positions[0]: idx + 1].strip()
76
- return (text[:dash_positions[0]].strip(), delimiter + text[idx + 1:].strip())
77
-
78
- return None
79
-
80
- def format_output_context(func: Callable[[Optional[List[Tuple[str, bool]]], str, float, int, float, float], str]) -> Callable[[Optional[List[Tuple[str, bool]]], str], str]:
81
- def wrapped(*args, **kwargs) -> str:
82
- input_prompt, user_input = args[0][-1], args[1]
83
- histories = args[0][:-1]
84
-
85
- sys_prefix = ''
86
- human_prefix = '<span style=\'color:blue\'>Human:</span>'
87
- ai_prefix = '<span style=\'color:green\'>Assistant:</span>'
88
-
89
- output = '\n\n--- EXPANSION ---\n\n'
90
-
91
- for line in input_prompt.split('\n'):
92
- sanitized_line = line.replace('<|endoftext|>', '')
93
- output += '{} {}'.format(human_prefix, sanitized_line)
94
-
95
- sep = '\n' + ('-' * 60) + '\n'
96
- formatted_user_input = user_input.replace('\n', ' ')
97
- output += sep + ai_prefix + ' Question:' + sep + human_prefix + formatted_user_input + '\n\n'
98
-
99
- tokens = tokenize_markdown(formatted_user_input)
100
- output += ai_prefix + ' Answer:' + '\n\n'
101
-
102
- return output + func(histories, formatted_user_input, kwargs.pop('temperature'), kwargs.pop('max_new_tokens'), kwargs.pop('top_p'), kwargs.pop('repeat_penalty'))
103
-
104
- return wrapped
105
-
106
- async def fetch_and_generate_app(query_iter: Any) -> str:
107
- async for token in query_iter:
108
- yield token
109
-
110
- file_urls = [
111
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/agent.py',
112
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/alpine.mts',
113
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/app.py',
114
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/createLlamaPrompt.mts',
115
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/createSpace.mts',
116
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/cust_types.py',
117
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/daisy.mts',
118
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/dl.sh',
119
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/docker.mts',
120
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/generateFiles.mts',
121
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/getGradioApp.mts',
122
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/getReactApp.mts',
123
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/getStreamlitApp.mts',
124
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/getWebApp.mts',
125
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/gradioDoc.mts',
126
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/index.mts',
127
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/isPythonOrGradioAppPrompt.mts',
128
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/isReactAppPrompt.mts',
129
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/isStreamlitAppPrompt.mts',
130
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/parseTutorial.mts',
131
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/streamlitDoc.mts',
132
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/types.mts',
133
- 'https://huggingface.co/spaces/acecalisto3/Mistri/raw/main/typescript.mts',
134
- ]
135
 
136
- def execute_mistri_script(_: List[Tuple[str, bool]], __: str) -> str:
137
- """Pass function definition"""
138
- return ''
139
-
140
- execution_fn = process_download_and_execute(file_urls, execute_mistri_script)
141
-
142
- execution_fn = process_download_and_execute(file_urls, execute_mistri_script)
143
- gradedio_interface = gr.Interface(execution_fn,
144
- input_types=['text'],
145
- output_types=['text'],
146
- input_fields=[
147
- gr.inputs.Textbox(label='Input')],
148
- output_field=gr.outputs.Textbox(label='Output'))
149
- async def main():
150
- await gradio_interface.launch(inline=True)
151
- asyncio.run(main())
 
1
+ from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
  import random
4
+ import prompts
5
+ client = InferenceClient(
6
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
+ )
8
+
9
+ def format_prompt(message, history):
10
+ prompt = "<s>"
11
+ for user_prompt, bot_response in history:
12
+ prompt += f"[INST] {user_prompt} [/INST]"
13
+ prompt += f" {bot_response}</s> "
14
+ prompt += f"[INST] {message} [/INST]"
15
+ return prompt
16
+ agents =[
17
+ "WEB_DEV",
18
+ "AI_SYSTEM_PROMPT",
19
+ "PYTHON_CODE_DEV",
20
+ "CODE_REVIEW_ASSISTANT",
21
+ "CONTENT_WRITER_EDITOR",
22
+ #"SOCIAL_MEDIA_MANAGER",
23
+ #"MEME_GENERATOR",
24
+ "QUESTION_GENERATOR",
25
+ #"IMAGE_GENERATOR",
26
+ "HUGGINGFACE_FILE_DEV",
27
 
28
+ ]
29
+ def generate(
30
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
31
+ ):
32
+ seed = random.randint(1,1111111111111111)
33
+
34
+
35
+ agent=prompts.WEB_DEV
36
+ if agent_name == "WEB_DEV":
37
+ agent = prompts.WEB_DEV_SYSTEM_PROMPT
38
+ if agent_name == "CODE_REVIEW_ASSISTANT":
39
+ agent = prompts.CODE_REVIEW_ASSISTANT
40
+ if agent_name == "CONTENT_WRITER_EDITOR":
41
+ agent = prompts.CONTENT_WRITER_EDITOR
42
+ if agent_name == "SOCIAL_MEDIA_MANAGER":
43
+ agent = prompts.SOCIAL_MEDIA_MANAGER
44
+ if agent_name == "AI_SYSTEM_PROMPT":
45
+ agent = prompts.AI_SYSTEM_PROMPT
46
+ if agent_name == "PYTHON_CODE_DEV":
47
+ agent = prompts.PYTHON_CODE_DEV
48
+ #if agent_name == "MEME_GENERATOR":
49
+ # agent = prompts.MEME_GENERATOR
50
+ if agent_name == "QUESTION_GENERATOR":
51
+ agent = prompts.QUESTION_GENERATOR
52
+ #if agent_name == "IMAGE_GENERATOR":
53
+ # agent = prompts.IMAGE_GENERATOR
54
+ if agent_name == "HUGGINGFACE_FILE_DEV":
55
+ agent = prompts.HUGGINGFACE_FILE_DEV
56
+ system_prompt=agent
57
+ temperature = float(temperature)
58
+ if temperature < 1e-2:
59
+ temperature = 1e-2
60
+ top_p = float(top_p)
61
+
62
+ generate_kwargs = dict(
63
+ temperature=temperature,
64
+ max_new_tokens=max_new_tokens,
65
+ top_p=top_p,
66
+ repetition_penalty=repetition_penalty,
67
+ do_sample=True,
68
+ seed=seed,
69
+ )
70
+
71
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
72
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
73
+ output = ""
74
+
75
+ for response in stream:
76
+ output += response.token.text
77
+ yield output
78
+ return output
79
+
80
+
81
+ additional_inputs=[
82
+ gr.Dropdown(
83
+ label="Agents",
84
+ choices=[s for s in agents],
85
+ value=agents[0],
86
+ interactive=True,
87
+ ),
88
+ gr.Textbox(
89
+ label="System Prompt",
90
+ max_lines=1,
91
+ interactive=True,
92
+ ),
93
+ gr.Slider(
94
+ label="Temperature",
95
+ value=0.9,
96
+ minimum=0.0,
97
+ maximum=1.0,
98
+ step=0.05,
99
+ interactive=True,
100
+ info="Higher values produce more diverse outputs",
101
+ ),
102
+
103
+ gr.Slider(
104
+ label="Max new tokens",
105
+ value=1048*10,
106
+ minimum=0,
107
+ maximum=1048*10,
108
+ step=64,
109
+ interactive=True,
110
+ info="The maximum numbers of new tokens",
111
+ ),
112
+ gr.Slider(
113
+ label="Top-p (nucleus sampling)",
114
+ value=0.90,
115
+ minimum=0.0,
116
+ maximum=1,
117
+ step=0.05,
118
+ interactive=True,
119
+ info="Higher values sample more low-probability tokens",
120
+ ),
121
+ gr.Slider(
122
+ label="Repetition penalty",
123
+ value=1.2,
124
+ minimum=1.0,
125
+ maximum=2.0,
126
+ step=0.05,
127
+ interactive=True,
128
+ info="Penalize repeated tokens",
129
+ ),
130
 
 
 
 
131
 
132
+ ]
 
 
133
 
134
+ examples=[
135
+ ["Create a simple web application using Flask", agents[0], None, None, None, None, ],
136
+ ["Generate a Python script to perform a linear regression analysis", agents[2], None, None, None, None, ],
137
+ ["Create a Dockerfile for a Node.js application", agents[1], None, None, None, None, ],
138
+ ["Write a shell script to automate the deployment of a web application to a server", agents[3], None, None, None, None, ],
139
+ ["Generate a SQL query to retrieve the top 10 most popular products by sales", agents[4], None, None, None, None, ],
140
+ ["Write a Python script to generate a random password with a given length and complexity", agents[2], None, None, None, None, ],
141
+ ["Create a simple game in Unity using C#", agents[0], None, None, None, None, ],
142
+ ["Generate a Java program to implement a binary search algorithm", agents[2], None, None, None, None, ],
143
+ ["Write a shell script to monitor the CPU usage of a server", agents[1], None, None, None, None, ],
144
+ ["Create a simple web application using React and Node.js", agents[0], None, None, None, None, ],
145
+ ["Generate a Python script to perform a sentiment analysis on a given text", agents[2], None, None, None, None, ],
146
+ ["Write a shell script to automate the backup of a MySQL database", agents[1], None, None, None, None, ],
147
+ ["Create a simple game in Unreal Engine using C++", agents[3], None, None, None, None, ],
148
+ ["Generate a Java program to implement a bubble sort algorithm", agents[2], None, None, None, None, ],
149
+ ["Write a shell script to monitor the memory usage of a server", agents[1], None, None, None, None, ],
150
+ ["Create a simple web application using Angular and Node.js", agents[0], None, None, None, None, ],
151
+ ["Generate a Python script to perform a text classification on a given dataset", agents[2], None, None, None, None, ],
152
+ ["Write a shell script to automate the installation of a software package on a server", agents[1], None, None, None, None, ],
153
+ ["Create a simple game in Godot using GDScript", agents[3], None, None, None, None, ],
154
+ ["Generate a Java program to implement a merge sort algorithm", agents[2], None, None, None, None, ],
155
+ ["Write a shell script to automate the cleanup of temporary files on a server", agents[1], None, None, None, None, ],
156
+ ]
157
 
 
158
 
159
+ gr.ChatInterface(
160
+ fn=generate,
161
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
162
+ additional_inputs=additional_inputs,
163
+ title="Mixtral 46.7B",
164
+ examples=examples,
165
+ concurrency_limit=20,
166
+ ).launch(show_api=False)
167
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168