Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import gradio as gr | |
# Define model name | |
MODEL_NAME = "jojo-ai-mst/MyanmarGPT-Chat" | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
torch_dtype="float32", # Optimized for CPU usage | |
low_cpu_mem_usage=True # Helps with limited memory | |
) | |
# Chatbot function | |
def chatbot(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input text | |
outputs = model.generate( | |
inputs.input_ids, | |
max_new_tokens=150, # Limit response length | |
temperature=0.7, # Control randomness | |
top_p=0.9 # Nucleus sampling | |
) | |
# Decode and return the generated text | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Gradio interface | |
interface = gr.Interface( | |
fn=chatbot, | |
inputs=gr.Textbox( | |
label="Chat with Burmese ChatGPT", | |
placeholder="Type your message here in Burmese...", | |
lines=5 | |
), | |
outputs=gr.Textbox(label="Response"), | |
title="Burmese ChatGPT", | |
description="A chatbot powered by MyanmarGPT-Chat for Burmese conversations." | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
interface.launch() | |