Spaces:
Paused
Paused
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqGeneration
|
3 |
+
import torch
|
4 |
+
from typing import List, Dict
|
5 |
+
from datetime import datetime
|
6 |
+
|
7 |
+
class MissionContext:
|
8 |
+
def __init__(self):
|
9 |
+
self.mission_counter = 1
|
10 |
+
self.current_objectives = {}
|
11 |
+
self.conversation_history = []
|
12 |
+
|
13 |
+
def add_to_history(self, role: str, content: str):
|
14 |
+
self.conversation_history.append({
|
15 |
+
"role": role,
|
16 |
+
"content": content,
|
17 |
+
"timestamp": datetime.now().isoformat()
|
18 |
+
})
|
19 |
+
# Keep only last 5 messages for context
|
20 |
+
if len(self.conversation_history) > 5:
|
21 |
+
self.conversation_history.pop(0)
|
22 |
+
|
23 |
+
class MissionGenerator:
|
24 |
+
def __init__(self):
|
25 |
+
# Using FLAN-T5-base, a free and lightweight model good for instruction following
|
26 |
+
self.model_name = "google/flan-t5-base"
|
27 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
28 |
+
self.model = AutoModelForSeq2SeqGeneration.from_pretrained(self.model_name)
|
29 |
+
self.context = MissionContext()
|
30 |
+
|
31 |
+
def format_conversation_history(self) -> str:
|
32 |
+
"""Format conversation history for the model input"""
|
33 |
+
formatted = ""
|
34 |
+
for msg in self.context.conversation_history:
|
35 |
+
role = "User" if msg["role"] == "user" else "Assistant"
|
36 |
+
formatted += f"{role}: {msg['content']}\n"
|
37 |
+
return formatted
|
38 |
+
|
39 |
+
def generate_response(self, user_input: str) -> tuple[str, str]:
|
40 |
+
"""Generate both conversational response and formatted mission objectives"""
|
41 |
+
self.context.add_to_history("user", user_input)
|
42 |
+
|
43 |
+
# Create prompt for the model
|
44 |
+
conversation_history = self.format_conversation_history()
|
45 |
+
prompt = f"""
|
46 |
+
Previous conversation:
|
47 |
+
{conversation_history}
|
48 |
+
|
49 |
+
Task: Generate a mission for Original War game based on the conversation.
|
50 |
+
Format the response as follows:
|
51 |
+
1. A conversational response understanding the mission
|
52 |
+
2. The mission objectives in Original War format using Add Main/Secondary/Alternative
|
53 |
+
|
54 |
+
Current request: {user_input}
|
55 |
+
"""
|
56 |
+
|
57 |
+
# Generate response using the model
|
58 |
+
inputs = self.tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
|
59 |
+
outputs = self.model.generate(
|
60 |
+
inputs["input_ids"],
|
61 |
+
max_length=256,
|
62 |
+
num_beams=4,
|
63 |
+
temperature=0.7,
|
64 |
+
no_repeat_ngram_size=2
|
65 |
+
)
|
66 |
+
|
67 |
+
full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
68 |
+
|
69 |
+
# Split the response into conversational and formatted parts
|
70 |
+
try:
|
71 |
+
parts = full_response.split("# M1")
|
72 |
+
chat_response = parts[0].strip()
|
73 |
+
formatted_objectives = "# M1" + parts[1] if len(parts) > 1 else self.generate_fallback_objectives(user_input)
|
74 |
+
except Exception:
|
75 |
+
chat_response = full_response
|
76 |
+
formatted_objectives = self.generate_fallback_objectives(user_input)
|
77 |
+
|
78 |
+
self.context.add_to_history("assistant", chat_response)
|
79 |
+
return chat_response, formatted_objectives
|
80 |
+
|
81 |
+
def generate_fallback_objectives(self, user_input: str) -> str:
|
82 |
+
"""Generate basic objectives if the main generation fails"""
|
83 |
+
return f"""# M1
|
84 |
+
Add Main mission_objective
|
85 |
+
- Complete the primary mission goal
|
86 |
+
Add Secondary bonus_objective
|
87 |
+
- Optional additional task
|
88 |
+
#"""
|
89 |
+
|
90 |
+
def create_gradio_interface():
|
91 |
+
generator = MissionGenerator()
|
92 |
+
|
93 |
+
def process_input(user_input: str, history: List[Dict]) -> tuple[List[Dict], str]:
|
94 |
+
chat_response, formatted_output = generator.generate_response(user_input)
|
95 |
+
history.append({"user": user_input, "bot": chat_response})
|
96 |
+
return history, formatted_output
|
97 |
+
|
98 |
+
with gr.Blocks() as interface:
|
99 |
+
gr.Markdown("""
|
100 |
+
# Original War Mission Objective Generator
|
101 |
+
Describe your mission scenario in natural language, and I'll help you create formatted mission objectives.
|
102 |
+
""")
|
103 |
+
|
104 |
+
chatbot = gr.Chatbot(height=400)
|
105 |
+
msg = gr.Textbox(
|
106 |
+
label="Describe your mission scenario",
|
107 |
+
placeholder="Tell me about the mission you want to create..."
|
108 |
+
)
|
109 |
+
clear = gr.Button("Clear Conversation")
|
110 |
+
formatted_output = gr.Textbox(
|
111 |
+
label="Generated Mission Objectives",
|
112 |
+
lines=10,
|
113 |
+
placeholder="Mission objectives will appear here..."
|
114 |
+
)
|
115 |
+
|
116 |
+
msg.submit(process_input,
|
117 |
+
inputs=[msg, chatbot],
|
118 |
+
outputs=[chatbot, formatted_output])
|
119 |
+
clear.click(lambda: ([], ""), outputs=[chatbot, formatted_output])
|
120 |
+
|
121 |
+
gr.Examples([
|
122 |
+
["I need a mission where players have to infiltrate an enemy base. They should try to avoid detection, but if they get spotted, they'll need to fight their way through."],
|
123 |
+
["Create a defensive mission where players protect a convoy. They should also try to minimize civilian casualties."],
|
124 |
+
["I want players to capture a strategic point. They can either do it by force or try diplomatic negotiations with the local faction."]
|
125 |
+
])
|
126 |
+
|
127 |
+
return interface
|
128 |
+
|
129 |
+
# Launch the interface
|
130 |
+
if __name__ == "__main__":
|
131 |
+
iface = create_gradio_interface()
|
132 |
+
iface.launch()
|
133 |
+
|
134 |
+
"""
|
135 |
+
# Discord bot implementation using the same generator
|
136 |
+
import discord
|
137 |
+
from discord.ext import commands
|
138 |
+
import os
|
139 |
+
|
140 |
+
class MissionBot(commands.Bot):
|
141 |
+
def __init__(self):
|
142 |
+
super().__init__(command_prefix="!")
|
143 |
+
self.generator = MissionGenerator()
|
144 |
+
|
145 |
+
async def on_ready(self):
|
146 |
+
print(f'{self.user} has connected to Discord!')
|
147 |
+
|
148 |
+
@commands.command(name='mission')
|
149 |
+
async def generate_mission(self, ctx, *, description: str):
|
150 |
+
chat_response, formatted_output = self.generator.generate_response(description)
|
151 |
+
|
152 |
+
# Split response if it's too long for Discord
|
153 |
+
if len(formatted_output) > 1990: # Discord has 2000 char limit
|
154 |
+
await ctx.send(f"💭 {chat_response}")
|
155 |
+
await ctx.send(f"```\n{formatted_output}\n```")
|
156 |
+
else:
|
157 |
+
await ctx.send(f"💭 {chat_response}\n\n```\n{formatted_output}\n```")
|
158 |
+
|
159 |
+
# Initialize and run the bot
|
160 |
+
if __name__ == "__main__":
|
161 |
+
bot = MissionBot()
|
162 |
+
bot.run(os.getenv('DISCORD_TOKEN'))
|
163 |
+
"""
|