import gradio as gr from typing import List, Tuple, Optional, Union import os from transformers import AutoModelForCausalLM, AutoTokenizer import torch class ChristmasBot: def __init__(self, model_name: str = "TheBloke/Mistral-7B-v0.1-GGUF"): """ Initialize the Christmas chatbot with a Hugging Face model. Default model is Mistral-7B, but you can change it to any other model. """ self.stable_diffusion_available = False # Initialize the model and tokenizer print("Loading model and tokenizer...") self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto", load_in_8bit=True # Enable 8-bit quantization for memory efficiency ) print("Model loaded successfully!") # System prompt to give the model Christmas context self.system_prompt = """You are Holly, a cheerful Christmas helper chatbot. You love everything about Christmas and respond in a warm, festive manner. Keep your responses concise but friendly. If users ask about sensitive topics, guide the conversation back to Christmas-related subjects.""" def _generate_image(self, prompt: str) -> Optional[str]: """Placeholder for image generation functionality.""" if not self.stable_diffusion_available: return None return None def _format_chat_history(self, history: List[List[str]]) -> str: """Format the chat history into a single string for the model.""" formatted_history = self.system_prompt + "\n\n" for user_msg, bot_msg in history: if user_msg: formatted_history += f"User: {user_msg}\n" if bot_msg: formatted_history += f"Assistant: {bot_msg}\n" return formatted_history def _get_llm_response(self, message: str, history: List[List[str]]) -> str: """Generate response using the Hugging Face model.""" try: # Format the conversation history with the new message chat_history = self._format_chat_history(history) prompt = f"{chat_history}User: {message}\nAssistant:" # Tokenize input inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024) inputs = inputs.to(self.model.device) # Generate response outputs = self.model.generate( inputs["input_ids"], max_length=2048, temperature=0.7, top_p=0.9, repetition_penalty=1.2, do_sample=True, num_return_sequences=1, pad_token_id=self.tokenizer.eos_token_id ) # Decode and clean up the response response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) response = response.split("Assistant:")[-1].strip() return response except Exception as e: print(f"Error generating response: {e}") return "Ho ho ho! I seem to be having trouble with my Christmas magic. Could you try asking me something else?" def process_message(self, message: str, history: List[List[str]]) -> Union[str, Tuple[str, str]]: """Process user message and return appropriate response.""" # Initial greeting if not history: return "Ho ho ho! Merry Christmas! I'm Holly, your Christmas helper. Would you like to create a Christmas card or chat about the holidays?" message = message.lower() last_response = history[-1][1].lower() if history else "" # Handle card creation request if "card" in message: if self.stable_diffusion_available: return "Wonderful! Let's create a Christmas card. Please describe the scene you'd like on your card, and I'll generate it for you using AI." return "I'm sorry, but the card generation feature is currently unavailable. Let's chat about Christmas instead!" # Handle card generation if "card" in last_response and self.stable_diffusion_available: image = self._generate_image(f"Christmas card scene: {message}") if image: return ( f"I've created a Christmas card based on your description: '{message}'. " f"Would you like to create another card or chat about something else?", image ) return "I'm sorry, I couldn't generate the image. Would you like to try again or chat about something else?" # Default to LLM response for all other messages return self._get_llm_response(message, history) def create_gradio_interface() -> gr.Blocks: """Create and configure the Gradio interface.""" # You can change the model here bot = ChristmasBot(model_name="TheBloke/Mistral-7B-v0.1-GGUF") def user(user_message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]: return "", history + [[user_message, None]] def bot_response(history: List[List[str]]) -> Tuple[List[List[str]], Optional[str]]: bot_message = bot.process_message(history[-1][0], history[:-1]) if isinstance(bot_message, tuple): history[-1][1] = bot_message[0] return history, bot_message[1] history[-1][1] = bot_message return history, None with gr.Blocks() as demo: gr.Markdown("# 🎄 Christmas Chatbot & Card Generator 🎅") gr.Markdown(""" Welcome to the Christmas Chatbot! - Chat about anything Christmas-related - Type 'card' to create a custom Christmas card """) chatbot = gr.Chatbot() msg = gr.Textbox( label="Type your message here", placeholder="Ask me anything about Christmas or request a card!", show_label=True ) clear = gr.Button("Clear Chat") image_output = gr.Image(label="Generated Card") msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot_response, chatbot, [chatbot, image_output] ) clear.click(lambda: None, None, chatbot, queue=False) return demo if __name__ == "__main__": demo = create_gradio_interface() demo.launch()