Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM, AutoModelForSeq2SeqLM | |
# Load DNA Analysis Model | |
dna_tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t6_8M_UR50D") | |
dna_model = AutoModelForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D") | |
dna_pipeline = pipeline("fill-mask", model=dna_model, tokenizer=dna_tokenizer) | |
# Load Ethical Inquiry and Learning Support Model | |
ethics_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") | |
ethics_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") | |
ethics_pipeline = pipeline("text2text-generation", model=ethics_model, tokenizer=ethics_tokenizer) | |
# Query Classification | |
def classify_query(query): | |
"""Classify the query into DNA Analysis, Ethical Inquiry, or Learning Support.""" | |
if "DNA" in query or "sequence" in query: | |
return "dna_analysis" | |
elif "ethics" in query or "privacy" in query: | |
return "ethical_inquiry" | |
else: | |
return "learning_support" | |
# Process Query | |
def handle_query(query): | |
"""Route the query to the appropriate model and generate a response.""" | |
task = classify_query(query) | |
if task == "dna_analysis": | |
try: | |
# Example DNA sequence processing | |
masked_sequence = query.replace("X", "[MASK]") | |
output = dna_pipeline(masked_sequence) | |
return f"DNA Analysis Result: {output}" | |
except Exception as e: | |
return f"Error in DNA Analysis: {e}" | |
elif task == "ethical_inquiry": | |
try: | |
# Ethical guidance response | |
response = ethics_pipeline(query) | |
return f"Ethical Inquiry Response: {response[0]['generated_text']}" | |
except Exception as e: | |
return f"Error in Ethical Inquiry: {e}" | |
else: | |
try: | |
# Learning support or general question response | |
response = ethics_pipeline(query) | |
return f"Learning Support Response: {response[0]['generated_text']}" | |
except Exception as e: | |
return f"Error in Learning Support: {e}" | |
# Gradio Interface | |
def chatbot(query): | |
return handle_query(query) | |
# Deploy with Gradio | |
interface = gr.Interface( | |
fn=chatbot, | |
inputs="text", | |
outputs="text", | |
title="BioSphere AI Chatbot", | |
description="A chatbot for DNA Analysis, Ethical Guidance, and Learning Support in Biotech.", | |
) | |
# Add Gemmini API Key Integration | |
def deploy_with_gemmini(api_key): | |
print(f"Deploying using Gemmini API Key: {api_key}") | |
interface.launch() | |
# Replace 'your_api_key' with your actual Gemmini API key | |
gemmini_api_key = "your_api_key" | |
deploy_with_gemmini(gemmini_api_key) | |