Arrcttacsrks commited on
Commit
017fa47
·
verified ·
1 Parent(s): 3b9927d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -34
app.py CHANGED
@@ -1,51 +1,45 @@
1
- from flask import Flask, render_template, request, jsonify, session
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
- from datetime import datetime
5
 
6
  app = Flask(__name__)
7
- app.secret_key = "supersecretkey" # Dùng để quản lý session
8
 
9
- # Tải hình tokenizer từ Hugging Face
10
  model_path = "phamhai/Llama-3.2-3B-Instruct-Frog"
11
  tokenizer = AutoTokenizer.from_pretrained(model_path)
12
  model = AutoModelForCausalLM.from_pretrained(model_path)
13
 
 
 
 
14
  @app.route('/')
15
  def index():
16
- session.setdefault('history', []) # Tạo session nếu chưa có
17
- return render_template('index.html', history=session['history'])
18
 
19
  @app.route('/chat', methods=['POST'])
20
  def chat():
21
- user_input = request.json.get("message")
22
- if not user_input:
23
- return jsonify({"response": "Xin hãy nhập tin nhắn!"})
24
-
25
- # Tokenize input và sinh output
26
- messages = [
27
- {"role": "system", "content": "Bạn là trợ lý của tôi, tên là Vivi."},
28
- {"role": "user", "content": user_input}
29
- ]
30
- tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
31
- outputs = model.generate(tokenized_chat, max_new_tokens=128)
32
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
-
34
- # Lưu lại vào lịch sử chat
35
- chat_entry = {
36
- "user": user_input,
37
- "bot": response,
38
- "timestamp": datetime.now().strftime("%H:%M:%S") # Thêm thời gian
39
- }
40
- session['history'].append(chat_entry)
41
- session.modified = True # Cập nhật session
42
-
43
- return jsonify({"response": response, "timestamp": chat_entry["timestamp"]})
44
 
45
  @app.route('/clear', methods=['POST'])
46
  def clear():
47
- session['history'] = [] # Xóa lịch sử chat
48
- return jsonify({"status": "ok"})
 
49
 
50
- if __name__ == "__main__":
51
- app.run(debug=False)
 
1
+ from flask import Flask, render_template, request, jsonify
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import datetime
 
4
 
5
  app = Flask(__name__)
 
6
 
7
+ # Load the model and tokenizer
8
  model_path = "phamhai/Llama-3.2-3B-Instruct-Frog"
9
  tokenizer = AutoTokenizer.from_pretrained(model_path)
10
  model = AutoModelForCausalLM.from_pretrained(model_path)
11
 
12
+ # Store chat history
13
+ chat_history = []
14
+
15
  @app.route('/')
16
  def index():
17
+ return render_template('index.html', history=chat_history)
 
18
 
19
  @app.route('/chat', methods=['POST'])
20
  def chat():
21
+ user_message = request.json['message']
22
+
23
+ # Append user message to chat history
24
+ timestamp = datetime.datetime.now().strftime("%H:%M:%S")
25
+ chat_history.append({'timestamp': timestamp, 'user': user_message})
26
+
27
+ # Generate a response
28
+ input_text = user_message # Modify this as needed for your model's input
29
+ inputs = tokenizer(input_text, return_tensors='pt')
30
+ outputs = model.generate(**inputs, max_new_tokens=128)
31
+ bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ # Append bot response to chat history
34
+ chat_history.append({'timestamp': timestamp, 'bot': bot_response})
35
+
36
+ return jsonify({'timestamp': timestamp, 'response': bot_response})
 
 
 
 
 
 
 
37
 
38
  @app.route('/clear', methods=['POST'])
39
  def clear():
40
+ global chat_history
41
+ chat_history = [] # Clear chat history
42
+ return jsonify(success=True)
43
 
44
+ if __name__ == '__main__':
45
+ app.run(host='0.0.0.0', port=7860) # Set host and port for Hugging Face Spaces