Spaces:
Build error
Build error
File size: 5,712 Bytes
492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 edd72e4 492de75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import os
import random
import gradio as gr
import wget
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import whisper
from audiocraft.models import MusicGen
# URLs de los modelos a descargar
model_urls = [
"https://huggingface.co/leejet/FLUX.1-schnell-gguf/resolve/main/flux1-schnell-q2_k.gguf",
"https://huggingface.co/aifoundry-org/FLUX.1-schnell-Quantized/resolve/main/flux1-schnell-Q2_K.gguf",
"https://huggingface.co/qwp4w3hyb/gemma-2-27b-it-iMat-GGUF/resolve/main/gemma-2-27b-it-imat-IQ1_S.gguf",
"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q2_K.gguf",
"https://huggingface.co/WongBingbing/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF/resolve/main/meta-llama-3.1-8b-instruct-q2_k.gguf",
"https://huggingface.co/city96/FLUX.1-schnell-gguf/resolve/main/flux1-schnell-Q2_K.gguf",
"https://huggingface.co/mradermacher/L3-Super-Nova-RP-8B-i1-GGUF/resolve/main/L3-Super-Nova-RP-8B.i1-IQ1_M.gguf",
"https://huggingface.co/zhhan/Phi-3-mini-4k-instruct_gguf_derived/resolve/main/Phi-3-mini-4k-instruct-q4.gguf"
]
# Nombres de los archivos descargados
model_files = [
"flux1-schnell-q2_k.gguf",
"flux1-schnell-Q2_K.gguf",
"gemma-2-27b-it-imat-IQ1_S.gguf",
"llama-2-7b-chat.Q2_K.gguf",
"meta-llama-3.1-8b-instruct-q2_k.gguf",
"flux1-schnell-Q2_K.gguf",
"L3-Super-Nova-RP-8B.i1-IQ1_M.gguf",
"Phi-3-mini-4k-instruct-q4.gguf"
]
# Funci贸n para descargar los modelos utilizando wget
def download_models(model_urls, model_files):
for url, file in zip(model_urls, model_files):
if not os.path.exists(file):
wget.download(url, out=file)
# Inicializar el modelo de transcripci贸n Whisper
def initialize_whisper():
model = whisper.load_model("base")
return model
# Inicializa los modelos de transformers
def initialize_transformer_models():
model_names = ["gpt2", "gpt2-medium", "gpt2-large"] # Puedes agregar m谩s modelos
models = []
for model_name in model_names:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
models.append((model, tokenizer))
return models
# Funci贸n para la generaci贸n de canciones con MusicGen
def generate_song(prompt, model_type="standard"):
if model_type == "medium":
model = MusicGen.get_pretrained("musicgen-medium")
else:
model = MusicGen.get_pretrained("melody")
model.set_generation_params(duration=30) # Duraci贸n de la canci贸n en segundos
wav_output = model.generate(prompt)
song_path = "generated_song.wav"
model.save_wav(wav_output, song_path)
return song_path
# Funci贸n para transcribir audio con Whisper
def transcribe_audio(audio_path, whisper_model):
transcription = whisper_model.transcribe(audio_path)
return transcription["text"]
# Funci贸n para unificar las respuestas de diferentes modelos
def unified_response(user_input, models):
responses = []
for model, tokenizer in models:
inputs = tokenizer(user_input, return_tensors="pt")
outputs = model.generate(**inputs)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
responses.append(response)
# Unificar respuestas (puedes aplicar m谩s l贸gica aqu铆, como seleccionar la m谩s com煤n)
final_response = random.choice(responses)
return final_response
# Funci贸n para el chatbot con Gradio
def chatbot_response(user_input, models, whisper_model=None, audio_path=None):
if user_input.lower() == "salir":
return "Conexi贸n terminada."
if "imagen" in user_input.lower():
return "Funcionalidad de generaci贸n de im谩genes no soportada por estos modelos."
elif "canci贸n" in user_input.lower() or "musica" in user_input.lower():
model_type = "medium" if "medium" in user_input.lower() else "standard"
song_path = generate_song(user_input, model_type=model_type)
return song_path # Devuelve la ruta de la canci贸n generada
elif audio_path: # Si se proporciona un archivo de audio, transcribirlo
return transcribe_audio(audio_path, whisper_model)
else:
return unified_response(user_input, models)
# Crear la interfaz de Gradio
def create_gradio_interface(models, whisper_model):
def gradio_chat(user_input, audio_input=None):
response = chatbot_response(user_input, models, whisper_model, audio_input)
if isinstance(response, str) and response.endswith(".png"):
return None, response, None, None # Devuelve None en el texto y la imagen, y ninguna canci贸n
elif isinstance(response, str) and response.endswith(".wav"):
return None, None, response, None # Devuelve None en el texto, ninguna imagen, y la canci贸n
else:
return response, None, None, None # Devuelve el texto, ninguna imagen, ninguna canci贸n, y ninguna transcripci贸n
# Crear interfaz con un input y cuatro outputs (texto, imagen, canci贸n, y transcripci贸n)
iface = gr.Interface(fn=gradio_chat, inputs=["text", "audio"], outputs=["text", "image", "audio", "text"], title="Chatbot con Im谩genes, Canciones, y Transcripci贸n de Audio")
return iface
# Ejecuta el chatbot con Gradio
def run_chatbot_with_gradio():
download_models(model_urls, model_files) # Descargar los modelos si no est谩n presentes
models = initialize_transformer_models() # Inicializar modelos de Transformers
whisper_model = initialize_whisper() # Inicializar el modelo de Whisper
iface = create_gradio_interface(models, whisper_model)
iface.launch()
if __name__ == "__main__":
run_chatbot_with_gradio()
|