curry tang commited on
Commit
68cf8d3
·
1 Parent(s): 007f9f4
Files changed (2) hide show
  1. app.py +19 -17
  2. utils.py +12 -0
app.py CHANGED
@@ -2,12 +2,11 @@ import gradio as gr
2
  from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
3
  from llm import DeepSeekLLM, OpenRouterLLM, TongYiLLM
4
  from config import settings
5
- import base64
6
- from PIL import Image
7
- import io
8
  from prompts import web_prompt, explain_code_template, optimize_code_template, debug_code_template, function_gen_template, translate_doc_template, backend_developer_prompt, analyst_prompt
9
  from langchain_core.prompts import PromptTemplate
10
  from log import logging
 
 
11
 
12
  logger = logging.getLogger(__name__)
13
 
@@ -22,6 +21,11 @@ provider_model_map = dict(
22
  Tongyi=tongyi_llm,
23
  )
24
 
 
 
 
 
 
25
 
26
  def get_default_chat():
27
  default_provider = settings.default_provider
@@ -34,8 +38,11 @@ def predict(message, history, _chat, _current_assistant: str):
34
  files_len = len(message.files)
35
  if _chat is None:
36
  _chat = get_default_chat()
37
- _lc_history = []
 
 
38
 
 
39
  assistant_prompt = web_prompt
40
  if _current_assistant == '后端开发助手':
41
  assistant_prompt = backend_developer_prompt
@@ -45,7 +52,8 @@ def predict(message, history, _chat, _current_assistant: str):
45
 
46
  for his_msg in history:
47
  if his_msg['role'] == 'user':
48
- _lc_history.append(HumanMessage(content=his_msg['content']))
 
49
  if his_msg['role'] == 'assistant':
50
  _lc_history.append(AIMessage(content=his_msg['content']))
51
 
@@ -53,15 +61,12 @@ def predict(message, history, _chat, _current_assistant: str):
53
  _lc_history.append(HumanMessage(content=message.text))
54
  else:
55
  file = message.files[0]
56
- with Image.open(file.path) as img:
57
- buffer = io.BytesIO()
58
- img = img.convert('RGB')
59
- img.save(buffer, format="JPEG")
60
- image_data = base64.b64encode(buffer.getvalue()).decode("utf-8")
61
- _lc_history.append(HumanMessage(content=[
62
- {"type": "text", "text": message.text},
63
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
64
- ]))
65
  logger.info(f"chat history: {_lc_history}")
66
 
67
  response_message = ''
@@ -275,9 +280,6 @@ with gr.Blocks() as app:
275
  email_doc_btn = gr.Button('邮件撰写')
276
  doc_gen_btn = gr.Button('文档润色')
277
  translate_doc_btn.click(fn=translate_doc, inputs=[language_input, language_output, doc, chat_engine], outputs=[code_result])
278
- with gr.Tab('生活娱乐'):
279
- with gr.Row():
280
- gr.Button("test")
281
 
282
 
283
  app.launch(debug=settings.debug, show_api=False)
 
2
  from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
3
  from llm import DeepSeekLLM, OpenRouterLLM, TongYiLLM
4
  from config import settings
 
 
 
5
  from prompts import web_prompt, explain_code_template, optimize_code_template, debug_code_template, function_gen_template, translate_doc_template, backend_developer_prompt, analyst_prompt
6
  from langchain_core.prompts import PromptTemplate
7
  from log import logging
8
+ from utils import convert_image_to_base64
9
+
10
 
11
  logger = logging.getLogger(__name__)
12
 
 
21
  Tongyi=tongyi_llm,
22
  )
23
 
24
+ support_vision_models = [
25
+ 'openai/gpt-4o-mini', 'anthropic/claude-3.5-sonnet', 'google/gemini-pro-1.5-exp',
26
+ 'openai/gpt-4o', 'google/gemini-flash-1.5', 'liuhaotian/llava-yi-34b', 'anthropic/claude-3-haiku',
27
+ ]
28
+
29
 
30
  def get_default_chat():
31
  default_provider = settings.default_provider
 
38
  files_len = len(message.files)
39
  if _chat is None:
40
  _chat = get_default_chat()
41
+ if files_len > 0:
42
+ if _chat.model_name not in support_vision_models:
43
+ raise gr.Error("当前模型不支持图片,请更换模型。")
44
 
45
+ _lc_history = []
46
  assistant_prompt = web_prompt
47
  if _current_assistant == '后端开发助手':
48
  assistant_prompt = backend_developer_prompt
 
52
 
53
  for his_msg in history:
54
  if his_msg['role'] == 'user':
55
+ if not hasattr(his_msg['content'], 'file'):
56
+ _lc_history.append(HumanMessage(content=his_msg['content']))
57
  if his_msg['role'] == 'assistant':
58
  _lc_history.append(AIMessage(content=his_msg['content']))
59
 
 
61
  _lc_history.append(HumanMessage(content=message.text))
62
  else:
63
  file = message.files[0]
64
+ image_data = convert_image_to_base64(file)
65
+ _lc_history.append(HumanMessage(content=[
66
+ {"type": "text", "text": message.text},
67
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
68
+ ]))
69
+
 
 
 
70
  logger.info(f"chat history: {_lc_history}")
71
 
72
  response_message = ''
 
280
  email_doc_btn = gr.Button('邮件撰写')
281
  doc_gen_btn = gr.Button('文档润色')
282
  translate_doc_btn.click(fn=translate_doc, inputs=[language_input, language_output, doc, chat_engine], outputs=[code_result])
 
 
 
283
 
284
 
285
  app.launch(debug=settings.debug, show_api=False)
utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from PIL import Image
3
+ import io
4
+
5
+
6
+ def convert_image_to_base64(file):
7
+ with Image.open(file.path) as img:
8
+ buffer = io.BytesIO()
9
+ img = img.convert('RGB')
10
+ img.save(buffer, format="JPEG")
11
+ image_data = base64.b64encode(buffer.getvalue()).decode("utf-8")
12
+ return image_data