File size: 1,567 Bytes
d3e5d4a
f55178e
4dce766
d3e5d4a
 
 
 
c056a0a
 
 
 
 
dd8ff4d
4dce766
c056a0a
 
 
 
 
4dce766
f55178e
4dce766
c056a0a
 
f55178e
4dce766
f55178e
 
c056a0a
 
 
 
 
 
 
f55178e
 
 
5f5a729
dd8ff4d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T")
model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T")

def generate_text(system_prompt, user_prompt, temperature, max_length, min_length):
    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt},
    ]
    tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

    inputs = tokenizer(tokenized_chat, return_tensors="pt").to(model.device) 

    outputs = model.generate(**inputs, use_cache=True, max_length=max_length, min_length=min_length, temperature=temperature, num_return_sequences=1)

    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

    return generated_text

def chatbot_app(system_prompt, user_prompt, temperature, max_length, min_length):
    generated_text = generate_text(system_prompt, user_prompt, temperature, max_length, min_length)
    return generated_text

iface = gr.Interface(
    fn=chatbot_app,
    inputs=[
        "text",
        "text",  # Added a text box for the system prompt
        gr.Number(minimum=0.1, maximum=2.0, value=1.0, label="Temperature"),
        gr.Number(minimum=10, maximum=2048, value=10, label="Max Length"),
        gr.Number(minimum=1, maximum=2048, value=1, label="Min Length"),
    ],
    outputs="text",
    live=False,
)

iface.launch()