File size: 1,509 Bytes
017fa47
688c860
017fa47
45aeeb7
 
 
017fa47
688c860
 
 
45aeeb7
017fa47
 
 
45aeeb7
688c860
017fa47
45aeeb7
 
 
017fa47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
688c860
 
 
017fa47
 
 
45aeeb7
017fa47
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from flask import Flask, render_template, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
import datetime

app = Flask(__name__)

# Load the model and tokenizer
model_path = "phamhai/Llama-3.2-3B-Instruct-Frog"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)

# Store chat history
chat_history = []

@app.route('/')
def index():
    return render_template('index.html', history=chat_history)

@app.route('/chat', methods=['POST'])
def chat():
    user_message = request.json['message']
    
    # Append user message to chat history
    timestamp = datetime.datetime.now().strftime("%H:%M:%S")
    chat_history.append({'timestamp': timestamp, 'user': user_message})

    # Generate a response
    input_text = user_message  # Modify this as needed for your model's input
    inputs = tokenizer(input_text, return_tensors='pt')
    outputs = model.generate(**inputs, max_new_tokens=128)
    bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Append bot response to chat history
    chat_history.append({'timestamp': timestamp, 'bot': bot_response})
    
    return jsonify({'timestamp': timestamp, 'response': bot_response})

@app.route('/clear', methods=['POST'])
def clear():
    global chat_history
    chat_history = []  # Clear chat history
    return jsonify(success=True)

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)  # Set host and port for Hugging Face Spaces