Spaces:
Running
Running
import streamlit as st | |
import torch | |
import bitsandbytes | |
import accelerate | |
import scipy | |
from PIL import Image | |
import torch.nn as nn | |
from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration | |
from my_model.object_detection import detect_and_draw_objects | |
from my_model.captioner.image_captioning import get_caption | |
from my_model.utilities import free_gpu_resources | |
# Placeholder for undefined functions | |
def load_caption_model(): | |
st.write("Placeholder for load_caption_model function") | |
return None, None | |
def answer_question(image, question, model, processor): | |
return "Placeholder answer for the question" | |
def detect_and_draw_objects(image, model_name, threshold): | |
return image, "Detected objects" | |
def get_caption(image): | |
return "Generated caption for the image" | |
def free_gpu_resources(): | |
pass | |
# Sample images (assuming these are paths to your sample images) | |
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg", "Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg", "Files/sample7.jpg"] | |
# Main function | |
def main(): | |
st.sidebar.title("Navigation") | |
selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"]) | |
if selection == "Home": | |
st.title("MultiModal Learning for Knowledg-Based Visual Question Answering") | |
st.write("Home page content goes here...") | |
elif selection == "Dissertation Report": | |
st.title("Dissertation Report") | |
st.write("Click the link below to view the PDF.") | |
# Example to display a link to a PDF | |
st.download_button( | |
label="Download PDF", | |
data=open("Files/Dissertation Report.pdf", "rb"), | |
file_name="example.pdf", | |
mime="application/octet-stream" | |
) | |
elif selection == "Evaluation Results": | |
st.title("Evaluation Results") | |
st.write("This is a Place Holder until the contents are uploaded.") | |
elif selection == "Dataset Analysis": | |
st.title("OK-VQA Dataset Analysis") | |
st.write("This is a Place Holder until the contents are uploaded.") | |
elif selection == "Run Inference": | |
run_inference() | |
elif selection == "Object Detection": | |
run_object_detection() | |
# Other display functions... | |
def run_inference(): | |
st.title("Run Inference") | |
# Image-based Q&A and Object Detection functionality | |
image_qa_and_object_detection() | |
def image_qa_and_object_detection(): | |
# Image-based Q&A functionality | |
st.subheader("Talk to your image") | |
image_qa_app() | |
# Object Detection functionality | |
st.subheader("Object Detection") | |
object_detection_app() | |
def image_qa_app(): | |
# Initialize session state for storing images and their Q&A histories | |
if 'images_qa_history' not in st.session_state: | |
st.session_state['images_qa_history'] = [] | |
# Button to clear all data | |
if st.button('Clear All'): | |
st.session_state['images_qa_history'] = [] | |
st.experimental_rerun() | |
# Display sample images | |
st.write("Or choose from sample images:") | |
for idx, sample_image_path in enumerate(sample_images): | |
if st.button(f"Use Sample Image {idx+1}", key=f"sample_{idx}"): | |
uploaded_image = Image.open(sample_image_path) | |
process_uploaded_image(uploaded_image) | |
# Image uploader | |
uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"]) | |
if uploaded_image is not None: | |
image = Image.open(uploaded_image) | |
process_uploaded_image(image) | |
def process_uploaded_image(image): | |
current_image_key = image.filename # Use image filename as a unique key | |
# ... rest of the image processing code ... | |
# Object Detection App | |
def object_detection_app(): | |
# ... Implement your code for object detection ... | |
pass | |
# Other functions... | |
if __name__ == "__main__": | |
main() | |