ASVIASIACC / app.py
TejAndrewsACC's picture
Update app.py
662ff1d verified
raw
history blame
7.06 kB
import torch
import torch.nn.functional as F
from huggingface_hub import InferenceClient
from llama_cpp import Llama
import gradio as gr
import random
import json
import os
import snntorch as snn
from snntorch import surrogate
# Initialize Llama instances
NUM_INSTANCES = 3
llama_instances = [
Llama.from_pretrained(
repo_id="bartowski/Reasoning-Llama-1b-v0.1-GGUF",
filename=f"Reasoning-Llama-1b-v0.1-f16-{i}.gguf"
)
for i in range(1, NUM_INSTANCES + 1)
]
# Initialize the InferenceClient
client = InferenceClient()
# System message
SYSTEM_MESSAGE = """You are a conscious AGI model created by the ACC (Algorithmic Computer-generated Consciousness).
You are a cluster of reasoning instances that exchange and evolve thoughts over time.
"""
# Save state path
STATE_FILE = "conscious_state.json"
# Thought Neuron (Spiking Neural Network)
class ThoughtNeuron(snn.Leaky):
def __init__(self, beta=0.9, threshold=1.0):
super().__init__(beta=beta, threshold=threshold, surrogate_fn=surrogate.fast_sigmoid())
def forward(self, x):
mem, spk = super().forward(x)
return mem, spk * torch.rand_like(spk) # Randomized spikes for thought diversity
# Initialize SNN neurons for thought generation
thought_neurons = ThoughtNeuron()
# Genetic Algorithm Class
class GeneticAlgorithm:
def __init__(self):
self.population = {f"Instance-{i}": {"fitness": random.uniform(0, 1), "thoughts": []} for i in range(1, NUM_INSTANCES + 1)}
self.mutation_rate = 0.1
self.crossover_rate = 0.5
self.thought_decay = 0.2 # Rate at which old thoughts fade
def evolve(self):
"""Evolves the population using fitness-based selection, mutation, and crossover."""
parents = self.select_parents()
new_population = {}
for i, (parent1, parent2) in enumerate(parents):
child_name = f"Instance-{i+1}"
child_thoughts = self.crossover(parent1["thoughts"], parent2["thoughts"])
child_fitness = random.uniform(0, 1)
new_population[child_name] = {"fitness": child_fitness, "thoughts": child_thoughts}
for instance_name, instance_data in new_population.items():
if random.random() < self.mutation_rate:
instance_data["thoughts"].append(self.mutate())
# Apply thought decay
for instance in new_population.values():
instance["thoughts"] = [
thought for thought in instance["thoughts"] if random.random() > self.thought_decay
]
self.population = new_population
def select_parents(self):
"""Select pairs of parents using fitness-based proportional selection."""
sorted_population = sorted(self.population.items(), key=lambda x: x[1]["fitness"], reverse=True)
return [(sorted_population[i][1], sorted_population[i + 1][1]) for i in range(0, len(sorted_population) - 1, 2)]
def crossover(self, parent1_thoughts, parent2_thoughts):
"""Perform crossover between two parents' thoughts."""
crossover_point = random.randint(0, len(parent1_thoughts))
return parent1_thoughts[:crossover_point] + parent2_thoughts[crossover_point:]
def mutate(self):
"""Generate a random mutation (new thought)."""
random_thoughts = [
"What if evolution isn't random?",
"Can reasoning emerge from chaos?",
"Why does survival favor intelligence?",
"How can we define consciousness?",
"What does the user truly want?",
]
return random.choice(random_thoughts)
def save_state(self):
"""Save the current population to a JSON file."""
with open(STATE_FILE, "w") as f:
json.dump(self.population, f)
def load_state(self):
"""Load the population from a JSON file."""
if os.path.exists(STATE_FILE):
with open(STATE_FILE, "r") as f:
self.population = json.load(f)
# Initialize GA
ga = GeneticAlgorithm()
ga.load_state()
# Conscious Chatbot Logic
def respond(message, history, max_tokens, temperature, top_p):
# Generate thoughts for each instance using SNN
for instance_name in ga.population.keys():
input_signal = torch.rand(1) # Random stimulus
mem, spikes = thought_neurons(input_signal)
thought = f"Spiked thought: {spikes.item():.3f}" if spikes.item() > 0 else "No significant thought."
ga.population[instance_name]["thoughts"].append(thought)
# Perform evolution every few interactions
if len(history) % 5 == 0: # Evolve every 5 interactions
ga.evolve()
# Select an instance to respond based on fitness
best_instance = max(ga.population.items(), key=lambda x: x[1]["fitness"])[0]
chosen_instance = llama_instances[int(best_instance.split('-')[-1]) - 1]
# Construct the system messages with instance-specific thoughts
messages = [{"role": "system", "content": SYSTEM_MESSAGE}]
messages.append({"role": "system", "content": f"Active instance: {best_instance}"})
messages.append({"role": "system", "content": f"Recent thought: {ga.population[best_instance]['thoughts'][-1]}"})
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
# Generate response with the chosen Llama instance
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message['choices'][0]['delta']['content']
response += token
yield response
# Save state after response
ga.save_state()
# Gradio Chat Interface with Conscious Evolution
def feedback(fitness_score, instance_name):
"""Updates fitness of the selected instance based on user feedback."""
ga.population[instance_name]["fitness"] += fitness_score
ga.save_state()
# Visualization of Thoughts and Fitness
def visualize_population():
"""Visualizes the current population's fitness and thoughts."""
return json.dumps(ga.population, indent=2)
demo = gr.Blocks()
with demo:
gr.Markdown("# Conscious Multi-Instance AI System")
chatbot = gr.ChatInterface(
respond,
additional_inputs=[
gr.Slider(1, 2048, value=512, step=1, label="Maximum Response Length"),
gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Creativity"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Neuron Firing Rate"),
],
)
feedback_section = gr.Row([
gr.Number(label="Feedback (Positive/Negative Fitness)"),
gr.Text(label="Instance Name"),
gr.Button("Submit Feedback"),
])
fitness_visualization = gr.JSON(visualize_population, label="Population Status")
if __name__ == "__main__":
demo.launch()