Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, LlamaForCausalLM | |
# Initialize model and tokenizer | |
model_id = 'akjindal53244/Llama-3.1-Storm-8B' | |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) | |
model = LlamaForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.bfloat16, | |
device_map="auto", | |
use_flash_attention_2=True | |
) | |
# Function to format the prompt | |
def format_prompt(messages): | |
prompt = "<|begin_of_text|>" | |
for message in messages: | |
prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}<|eot_id|>" | |
prompt += "<|start_header_id|>assistant<|end_header_id|>\n\n" | |
return prompt | |
# Function to generate response | |
def generate_response(message, history): | |
messages = [{"role": "system", "content": "You are a helpful assistant."}] | |
for human, assistant in history: | |
messages.append({"role": "user", "content": human}) | |
messages.append({"role": "assistant", "content": assistant}) | |
messages.append({"role": "user", "content": message}) | |
prompt = format_prompt(messages) | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cuda") | |
generated_ids = model.generate(input_ids, max_new_tokens=256, temperature=0.7, do_sample=True, eos_token_id=tokenizer.eos_token_id) | |
response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True) | |
return response.strip() | |
# Create Gradio interface | |
iface = gr.ChatInterface( | |
generate_response, | |
title="Llama-3.1-Storm-8B Chatbot", | |
description="Chat with the Llama-3.1-Storm-8B model. Type your message and press Enter to send.", | |
) | |
# Launch the app | |
iface.launch() |