HelloSun commited on
Commit
56291d0
·
verified ·
1 Parent(s): e76939b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +35 -0
README.md CHANGED
@@ -24,8 +24,43 @@ pip install optimum[openvino]
24
  To load your model you can do as follows:
25
 
26
  ```python
 
 
 
27
  from optimum.intel import OVModelForCausalLM
 
28
 
 
29
  model_id = "HelloSun/Qwen2.5-0.5B-Instruct-openvino"
30
  model = OVModelForCausalLM.from_pretrained(model_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ```
 
 
 
 
24
  To load your model you can do as follows:
25
 
26
  ```python
27
+ #app.py
28
+ import gradio as gr
29
+ from huggingface_hub import InferenceClient
30
  from optimum.intel import OVModelForCausalLM
31
+ from transformers import AutoTokenizer, pipeline
32
 
33
+ # 載入模型和標記器
34
  model_id = "HelloSun/Qwen2.5-0.5B-Instruct-openvino"
35
  model = OVModelForCausalLM.from_pretrained(model_id)
36
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
37
+
38
+ # 建立生成管道
39
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
40
+
41
+ def respond(message, history):
42
+ # 將當前訊息與歷史訊息合併
43
+ #input_text = message if not history else history[-1]["content"] + " " + message
44
+ input_text = message
45
+ # 獲取模型的回應
46
+ response = pipe(input_text, max_length=500, truncation=True, num_return_sequences=1)
47
+ reply = response[0]['generated_text']
48
+
49
+ # 返回新的消息格式
50
+ print(f"Message: {message}")
51
+ print(f"Reply: {reply}")
52
+ return reply
53
+
54
+ # 設定 Gradio 的聊天界面
55
+ demo = gr.ChatInterface(fn=respond, title="Chat with Qwen(通義千問) 2.5-0.5B", description="與 HelloSun/Qwen2.5-0.5B-Instruct-openvino 聊天!", type='messages')
56
+
57
+ if __name__ == "__main__":
58
+ demo.launch()
59
+ ```
60
+ ```requirements.txt
61
+ huggingface_hub==0.25.2
62
+ optimum[openvino]
63
  ```
64
+
65
+
66
+