RLealz commited on
Commit
8eba130
Β·
verified Β·
1 Parent(s): b836a73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -146
app.py CHANGED
@@ -1,156 +1,152 @@
1
  import gradio as gr
2
- import random
3
  import os
4
-
5
- # Attempt to import required libraries
6
- try:
7
- from diffusers import StableDiffusionPipeline
8
- import torch
9
- STABLE_DIFFUSION_AVAILABLE = True
10
- except ImportError as e:
11
- print(f"Error importing Stable Diffusion dependencies: {e}")
12
- STABLE_DIFFUSION_AVAILABLE = False
13
-
14
- try:
15
- import openai
16
- openai.api_key = os.environ.get("OPENAI_API_KEY")
17
- USE_GPT = True
18
- except ImportError:
19
- print("OpenAI library not found. Falling back to basic responses.")
20
- USE_GPT = False
21
-
22
- # Quiz questions and answers
23
- christmas_quiz = [
24
- {
25
- "question": "What is the traditional Christmas flower?",
26
- "options": ["Rose", "Poinsettia", "Tulip", "Daisy"],
27
- "answer": "Poinsettia"
28
- },
29
- {
30
- "question": "In which country did the tradition of putting up a Christmas tree originate?",
31
- "options": ["USA", "England", "Germany", "France"],
32
- "answer": "Germany"
33
- },
34
- {
35
- "question": "What is the name of the ballet often performed at Christmas?",
36
- "options": ["Swan Lake", "The Nutcracker", "Sleeping Beauty", "Giselle"],
37
- "answer": "The Nutcracker"
38
- },
39
- {
40
- "question": "Which company was the first to use Santa Claus in advertising?",
41
- "options": ["Pepsi", "Coca-Cola", "McDonald's", "Walmart"],
42
- "answer": "Coca-Cola"
43
- },
44
- {
45
- "question": "What is the most popular Christmas dinner in Japan?",
46
- "options": ["Turkey", "Ham", "KFC Chicken", "Roast Beef"],
47
- "answer": "KFC Chicken"
48
- }
49
- ]
50
-
51
- # Initialize the Stable Diffusion pipeline if available
52
- if STABLE_DIFFUSION_AVAILABLE:
53
- model_id = "runwayml/stable-diffusion-v1-5"
54
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
55
-
56
- def generate_image(prompt):
57
- if not STABLE_DIFFUSION_AVAILABLE:
58
  return None
59
- with torch.no_grad():
60
- image = pipe(prompt, num_inference_steps=50).images[0]
61
- return image
62
-
63
- def get_gpt_response(prompt, history):
64
- if not USE_GPT:
65
- return "I'm sorry, but I'm currently operating with limited capabilities. I can still help with the Christmas quiz and card generation!"
66
 
67
- messages = [
68
- {"role": "system", "content": "You are a helpful Christmas-themed chatbot named Holly. You can answer questions about Christmas, offer holiday tips, and engage in festive conversation. You also know about the Christmas quiz and card generation features of this application."},
69
- ]
70
- for h in history:
71
- messages.append({"role": "user", "content": h[0]})
72
- messages.append({"role": "assistant", "content": h[1]})
73
- messages.append({"role": "user", "content": prompt})
 
 
74
 
75
- try:
76
- response = openai.ChatCompletion.create(
77
- model="gpt-3.5-turbo",
78
- messages=messages,
79
- max_tokens=150,
80
- n=1,
81
- stop=None,
82
- temperature=0.7,
83
- )
84
- return response.choices[0].message['content'].strip()
85
- except Exception as e:
86
- print(f"Error in GPT response: {e}")
87
- return "I'm having trouble connecting to my knowledge base right now. Can I help you with the Christmas quiz or card generation instead?"
88
-
89
- def chatbot(message, history):
90
- if not history:
91
- return "Ho ho ho! Merry Christmas! I'm Holly, your Christmas helper. Would you like to take a Christmas quiz, create a Christmas card, or chat about the holidays?"
92
-
93
- last_response = history[-1][1].lower()
94
-
95
- if "quiz" in message.lower():
96
- question = random.choice(christmas_quiz)
97
- options_text = "\n".join([f"{i+1}. {opt}" for i, opt in enumerate(question['options'])])
98
- return f"Great! Here's your Christmas quiz question:\n\n{question['question']}\n\n{options_text}\n\nPlease enter the number of your answer."
99
-
100
- elif "card" in message.lower():
101
- if STABLE_DIFFUSION_AVAILABLE:
102
- return "Wonderful! Let's create a Christmas card. Please describe the scene you'd like on your card, and I'll generate it for you using AI."
103
- else:
104
- return "I'm sorry, but the card generation feature is currently unavailable. Would you like to take a Christmas quiz instead?"
105
-
106
- elif any(str(i) in message for i in range(1, 5)): # Check if the message is a quiz answer
107
- for q in christmas_quiz:
108
- if q['question'] in history[-2][1]: # Find the question in the history
109
- user_answer = q['options'][int(message) - 1]
110
- if user_answer == q['answer']:
111
- return f"Correct! {q['answer']} is the right answer. Would you like another question, to create a Christmas card, or to chat about something else?"
112
- else:
113
- return f"Sorry, that's not correct. The right answer is {q['answer']}. Would you like another question, to create a Christmas card, or to chat about something else?"
114
-
115
- elif "card" in last_response and STABLE_DIFFUSION_AVAILABLE:
116
- image = generate_image(f"Christmas card scene: {message}")
117
- if image:
118
- return (f"I've created a Christmas card based on your description: '{message}'. You can see it in the image box below. "
119
- f"Would you like to create another card, take a quiz, or chat about something else?", image)
120
- else:
121
- return "I'm sorry, I couldn't generate the image. Would you like to try again, take a quiz, or chat about something else?"
122
-
123
- else:
124
- # Use GPT-3.5 for general conversation
125
- return get_gpt_response(message, history)
126
-
127
- # Gradio interface
128
- with gr.Blocks() as demo:
129
- gr.Markdown("# Christmas Quiz and Card Generator Chatbot")
130
- gr.Markdown("""
131
- Welcome to the Christmas Quiz and Card Generator Chatbot!
132
- - Type 'quiz' to start a Christmas quiz.
133
- - Type 'card' to create a custom Christmas card.
134
- - Or just chat about anything Christmas-related!
135
- """)
136
- chatbot = gr.Chatbot()
137
- msg = gr.Textbox(label="Type your message here")
138
- clear = gr.Button("Clear")
139
-
140
- def user(user_message, history):
 
 
141
  return "", history + [[user_message, None]]
142
-
143
- def bot(history):
144
- bot_message = chatbot(history[-1][0], history[:-1])
145
- history[-1][1] = bot_message
146
  if isinstance(bot_message, tuple):
 
147
  return history, bot_message[1]
 
148
  return history, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- image_output = gr.Image()
151
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
152
- bot, chatbot, [chatbot, image_output]
153
- )
154
- clear.click(lambda: None, None, chatbot, queue=False)
155
-
156
- demo.launch()
 
1
  import gradio as gr
2
+ from typing import List, Tuple, Optional, Union
3
  import os
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import torch
6
+
7
+ class ChristmasBot:
8
+ def __init__(self, model_name: str = "TheBloke/Mistral-7B-v0.1-GGUF"):
9
+ """
10
+ Initialize the Christmas chatbot with a Hugging Face model.
11
+ Default model is Mistral-7B, but you can change it to any other model.
12
+ """
13
+ self.stable_diffusion_available = False
14
+
15
+ # Initialize the model and tokenizer
16
+ print("Loading model and tokenizer...")
17
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+ self.model = AutoModelForCausalLM.from_pretrained(
19
+ model_name,
20
+ torch_dtype=torch.float16,
21
+ device_map="auto",
22
+ load_in_8bit=True # Enable 8-bit quantization for memory efficiency
23
+ )
24
+ print("Model loaded successfully!")
25
+
26
+ # System prompt to give the model Christmas context
27
+ self.system_prompt = """You are Holly, a cheerful Christmas helper chatbot.
28
+ You love everything about Christmas and respond in a warm, festive manner.
29
+ Keep your responses concise but friendly.
30
+ If users ask about sensitive topics, guide the conversation back to Christmas-related subjects."""
31
+
32
+ def _generate_image(self, prompt: str) -> Optional[str]:
33
+ """Placeholder for image generation functionality."""
34
+ if not self.stable_diffusion_available:
35
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  return None
 
 
 
 
 
 
 
37
 
38
+ def _format_chat_history(self, history: List[List[str]]) -> str:
39
+ """Format the chat history into a single string for the model."""
40
+ formatted_history = self.system_prompt + "\n\n"
41
+ for user_msg, bot_msg in history:
42
+ if user_msg:
43
+ formatted_history += f"User: {user_msg}\n"
44
+ if bot_msg:
45
+ formatted_history += f"Assistant: {bot_msg}\n"
46
+ return formatted_history
47
 
48
+ def _get_llm_response(self, message: str, history: List[List[str]]) -> str:
49
+ """Generate response using the Hugging Face model."""
50
+ try:
51
+ # Format the conversation history with the new message
52
+ chat_history = self._format_chat_history(history)
53
+ prompt = f"{chat_history}User: {message}\nAssistant:"
54
+
55
+ # Tokenize input
56
+ inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
57
+ inputs = inputs.to(self.model.device)
58
+
59
+ # Generate response
60
+ outputs = self.model.generate(
61
+ inputs["input_ids"],
62
+ max_length=2048,
63
+ temperature=0.7,
64
+ top_p=0.9,
65
+ repetition_penalty=1.2,
66
+ do_sample=True,
67
+ num_return_sequences=1,
68
+ pad_token_id=self.tokenizer.eos_token_id
69
+ )
70
+
71
+ # Decode and clean up the response
72
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
73
+ response = response.split("Assistant:")[-1].strip()
74
+
75
+ return response
76
+
77
+ except Exception as e:
78
+ print(f"Error generating response: {e}")
79
+ return "Ho ho ho! I seem to be having trouble with my Christmas magic. Could you try asking me something else?"
80
+
81
+ def process_message(self, message: str, history: List[List[str]]) -> Union[str, Tuple[str, str]]:
82
+ """Process user message and return appropriate response."""
83
+ # Initial greeting
84
+ if not history:
85
+ return "Ho ho ho! Merry Christmas! I'm Holly, your Christmas helper. Would you like to create a Christmas card or chat about the holidays?"
86
+
87
+ message = message.lower()
88
+ last_response = history[-1][1].lower() if history else ""
89
+
90
+ # Handle card creation request
91
+ if "card" in message:
92
+ if self.stable_diffusion_available:
93
+ return "Wonderful! Let's create a Christmas card. Please describe the scene you'd like on your card, and I'll generate it for you using AI."
94
+ return "I'm sorry, but the card generation feature is currently unavailable. Let's chat about Christmas instead!"
95
+
96
+ # Handle card generation
97
+ if "card" in last_response and self.stable_diffusion_available:
98
+ image = self._generate_image(f"Christmas card scene: {message}")
99
+ if image:
100
+ return (
101
+ f"I've created a Christmas card based on your description: '{message}'. "
102
+ f"Would you like to create another card or chat about something else?",
103
+ image
104
+ )
105
+ return "I'm sorry, I couldn't generate the image. Would you like to try again or chat about something else?"
106
+
107
+ # Default to LLM response for all other messages
108
+ return self._get_llm_response(message, history)
109
+
110
+ def create_gradio_interface() -> gr.Blocks:
111
+ """Create and configure the Gradio interface."""
112
+ # You can change the model here
113
+ bot = ChristmasBot(model_name="TheBloke/Mistral-7B-v0.1-GGUF")
114
+
115
+ def user(user_message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]:
116
  return "", history + [[user_message, None]]
117
+
118
+ def bot_response(history: List[List[str]]) -> Tuple[List[List[str]], Optional[str]]:
119
+ bot_message = bot.process_message(history[-1][0], history[:-1])
 
120
  if isinstance(bot_message, tuple):
121
+ history[-1][1] = bot_message[0]
122
  return history, bot_message[1]
123
+ history[-1][1] = bot_message
124
  return history, None
125
+
126
+ with gr.Blocks() as demo:
127
+ gr.Markdown("# πŸŽ„ Christmas Chatbot & Card Generator πŸŽ…")
128
+ gr.Markdown("""
129
+ Welcome to the Christmas Chatbot!
130
+ - Chat about anything Christmas-related
131
+ - Type 'card' to create a custom Christmas card
132
+ """)
133
+
134
+ chatbot = gr.Chatbot()
135
+ msg = gr.Textbox(
136
+ label="Type your message here",
137
+ placeholder="Ask me anything about Christmas or request a card!",
138
+ show_label=True
139
+ )
140
+ clear = gr.Button("Clear Chat")
141
+ image_output = gr.Image(label="Generated Card")
142
+
143
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
144
+ bot_response, chatbot, [chatbot, image_output]
145
+ )
146
+ clear.click(lambda: None, None, chatbot, queue=False)
147
+
148
+ return demo
149
 
150
+ if __name__ == "__main__":
151
+ demo = create_gradio_interface()
152
+ demo.launch()