Spaces:
Running
Running
File size: 9,340 Bytes
fc1b9c5 bc7d231 c7c92f9 dfda773 356a130 58e3cb5 63fc765 b491c60 5554139 e9d7d81 1a06525 8cf7678 a650af8 aebc520 a650af8 b749133 fc1b9c5 a650af8 16f7989 fc1b9c5 16f7989 a650af8 16f7989 fc1b9c5 b491c60 bd86418 b491c60 b749133 bc7d231 fc1b9c5 ef4d7f5 498c16a fc1b9c5 498c16a fc1b9c5 498c16a ef4d7f5 bc7d231 fc1b9c5 9d4c7bc 0fa8d68 ef4d7f5 4c78312 ef4d7f5 b491c60 ef4d7f5 b491c60 ef4d7f5 8e08983 ef4d7f5 8e08983 6ea46e7 f960a9d 6ea46e7 f960a9d 6ea46e7 f960a9d 8e08983 6ea46e7 ef4d7f5 7391509 d40826b b491c60 d40826b 7a98a64 cc2adc7 7a98a64 bd86418 7a98a64 2b41e89 7a98a64 cc2adc7 7a98a64 a650af8 fc1b9c5 682bc75 7a98a64 2b41e89 bd86418 2b41e89 849e3ce a4d2f44 849e3ce 7a98a64 bd86418 849e3ce 2b41e89 7a98a64 849e3ce 7a98a64 fc1b9c5 7a98a64 b491c60 d40826b fc1b9c5 c6252cf ef4d7f5 c6252cf 7a98a64 9023a92 c6252cf fc1b9c5 c6252cf fc1b9c5 c6252cf fc1b9c5 c6252cf fc1b9c5 c6252cf 4c78312 fc1b9c5 c6252cf f05f048 7a98a64 a4d2f44 7a98a64 f05f048 7a98a64 fc1b9c5 f05f048 f35e4aa fc1b9c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
from PIL import Image
import torch.nn as nn
import pandas as pd
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.gen_utilities import free_gpu_resources
from my_model.KBVQA import KBVQA, prepare_kbvqa_model
def answer_question(caption, detected_objects_str, question, model):
answer = model.generate_answer(question, caption, detected_objects_str)
return answer
# Sample images (assuming these are paths to your sample images)
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg",
"Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg",
"Files/sample7.jpg"]
def analyze_image(image, model):
img = copy.deepcopy(image) # we dont wanna apply changes to the original image
caption = model.get_caption(img)
image_with_boxes, detected_objects_str = model.detect_objects(img)
st.text("I am ready, let's talk!")
free_gpu_resources()
return caption, detected_objects_str, image_with_boxes
def image_qa_app(kbvqa):
if 'images_data' not in st.session_state:
st.session_state['images_data'] = {}
# Display sample images as clickable thumbnails
st.write("Choose from sample images:")
cols = st.columns(len(sample_images))
for idx, sample_image_path in enumerate(sample_images):
with cols[idx]:
image = Image.open(sample_image_path)
st.image(image, use_column_width=True)
if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx}'):
process_new_image(sample_image_path, image, kbvqa)
# Image uploader
uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
if uploaded_image is not None:
process_new_image(uploaded_image.name, Image.open(uploaded_image), kbvqa)
# Display and interact with each uploaded/selected image
for image_key, image_data in st.session_state['images_data'].items():
st.image(image_data['image'], caption=f'Uploaded Image: {image_key[-11:]}', use_column_width=True)
if not image_data['analysis_done']:
st.text("Cool image, please click 'Analyze Image'..")
if st.button('Analyze Image', key=f'analyze_{image_key}'):
caption, detected_objects_str, image_with_boxes = analyze_image(image_data['image'], kbvqa) # we can use the image_with_boxes later if we want to show it.
image_data['caption'] = caption
image_data['detected_objects_str'] = detected_objects_str
image_data['analysis_done'] = True
# Initialize qa_history for each image
qa_history = image_data.get('qa_history', [])
if image_data['analysis_done']:
question = st.text_input(f"Ask a question about this image ({image_key[-11:]}):", key=f'question_{image_key}')
if st.button('Get Answer', key=f'answer_{image_key}'):
if question not in [q for q, _ in qa_history]:
answer = answer_question(image_data['caption'], image_data['detected_objects_str'], question, kbvqa)
qa_history.append((question, answer))
image_data['qa_history'] = qa_history
else:
st.info("This question has already been asked.")
# Display Q&A history for each image
for q, a in qa_history:
st.text(f"Q: {q}\nA: {a}\n")
def process_new_image(image_key, image, kbvqa):
"""Process a new image and update the session state."""
if image_key not in st.session_state['images_data']:
st.session_state['images_data'][image_key] = {
'image': image,
'caption': '',
'detected_objects_str': '',
'qa_history': [],
'analysis_done': False
}
def run_inference():
st.title("Run Inference")
st.write("Please note that this is not a general purpose model, it is specifically trained on OK-VQA dataset and is designed to give direct and short answers to the given questions.")
method = st.selectbox(
"Choose a method:",
["Fine-Tuned Model", "In-Context Learning (n-shots)"],
index=0
)
detection_model = st.selectbox(
"Choose a model for objects detection:",
["yolov5", "detic"],
index=1 # "detic" is selected by default
)
default_confidence = 0.2 if detection_model == "yolov5" else 0.4
confidence_level = st.slider(
"Select minimum detection confidence level",
min_value=0.1,
max_value=0.9,
value=default_confidence,
step=0.1
)
if 'model_settings' not in st.session_state:
st.session_state['model_settings'] = {'detection_model': detection_model, 'confidence_level': confidence_level}
settings_changed = (st.session_state['model_settings']['detection_model'] != detection_model or
st.session_state['model_settings']['confidence_level'] != confidence_level)
if settings_changed:
st.text("Model Settings have changed, please reload the model, this will take no time :)")
button_label = "Reload Model" if settings_changed else "Load Model"
if method == "Fine-Tuned Model":
if 'kbvqa' not in st.session_state:
st.session_state['kbvqa'] = None
if st.button(button_label):
free_gpu_resources()
if st.session_state['kbvqa'] is not None:
if not settings_changed:
st.write("Model already loaded.")
else:
free_gpu_resources()
detection_model = st.session_state['model_settings']['detection_model']
confidence_level = st.session_state['model_settings']['confidence_level']
prepare_kbvqa_model(detection_model, only_reload_detection_model=True) # only reload detection model with new settings
st.session_state['kbvqa'].detection_confidence = confidence_level
free_gpu_resources()
else:
st.text("Loading the model will take no more than a few minutes . .")
st.session_state['kbvqa'] = prepare_kbvqa_model(detection_model)
st.session_state['kbvqa'].detection_confidence = confidence_level
st.session_state['model_settings'] = {'detection_model': detection_model, 'confidence_level': confidence_level}
st.write("Model is ready for inference.")
free_gpu_resources()
if st.session_state['kbvqa']:
image_qa_app(st.session_state['kbvqa'])
else:
st.write('Model is not ready for inference yet')
# Display model settings
if 'kbvqa' in st.session_state and st.session_state['kbvqa'] is not None:
model_settings = {
'Detection Model': st.session_state['model_settings']['detection_model'],
'Confidence Level': st.session_state['model_settings']['confidence_level']
}
st.write("### Current Model Settings:")
st.table(pd.DataFrame(model_settings, index=[0]))
# Main function
def main():
st.sidebar.title("Navigation")
selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Finetuning and Evaluation Results", "Run Inference", "Dissertation Report", "Code"])
st.sidebar.write("More Pages will follow .. ")
if selection == "Home":
st.title("MultiModal Learning for Knowledg-Based Visual Question Answering")
st.write("Home page content goes here...")
elif selection == "Dissertation Report":
st.title("Dissertation Report")
st.write("Click the link below to view the PDF.")
# Example to display a link to a PDF
st.download_button(
label="Download PDF",
data=open("Files/Dissertation Report.pdf", "rb"),
file_name="example.pdf",
mime="application/octet-stream"
)
elif selection == "Evaluation Results":
st.title("Evaluation Results")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Dataset Analysis":
st.title("OK-VQA Dataset Analysis")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Finetuning and Evaluation Results":
st.title("Finetuning and Evaluation Results")
st.write("This is a Place Holder until the contents are uploaded.")
elif selection == "Run Inference":
run_inference()
elif selection == "Code":
st.title("Code")
st.markdown("You can view the code for this project on the Hugging Face Space file page.")
st.markdown("[View Code](https://huggingface.co/spaces/m7mdal7aj/Mohammed_Alhaj_PlayGround/tree/main)", unsafe_allow_html=True)
elif selection == "More Pages will follow .. ":
st.title("Staye Tuned")
st.write("This is a Place Holder until the contents are uploaded.")
if __name__ == "__main__":
main() |