File size: 1,192 Bytes
56f3ab6
25041c0
7ce9bfb
56f3ab6
fb6312f
 
7ce9bfb
54111eb
fb6312f
 
56f3ab6
 
 
 
 
 
 
fb6312f
 
 
 
 
56f3ab6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import streamlit as st
import torch  # <-- Add this import
from transformers import AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline

# Load the model and tokenizer in full precision and ensure it's on CPU
model = AutoModelForCausalLM.from_pretrained("PAIXAI/Astrid-1B").to(dtype=torch.float32).cpu()
tokenizer = AutoTokenizer.from_pretrained("PAIXAI/Astrid-1B")

# Initialize the pipeline with the model and tokenizer to run on CPU
generate_text = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=-1)  # -1 forces CPU usage

# Streamlit UI
st.title("Astrid-1B Chatbot")
st.write("Test the Astrid-1B chatbot from Hugging Face!")

user_input = st.text_input("Enter your question:")
if user_input:
    try:
        response = generate_text(user_input, min_new_tokens=2, max_new_tokens=256, do_sample=False, num_beams=1, temperature=0.3, repetition_penalty=1.2, renormalize_logits=True)
        st.write("Response:", response[0]["generated_text"])
    except Exception as e:
        st.write("Error:", str(e))

st.write("Note: This is a simple UI for demonstration purposes. Ensure you have the required libraries installed and adjust the model parameters as needed.")