Jyothirmai
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,11 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
3 |
import clipGPT
|
4 |
import vitGPT
|
|
|
|
|
5 |
import difflib
|
6 |
|
7 |
|
@@ -32,30 +36,38 @@ def generate_caption_vitgpt(image):
|
|
32 |
|
33 |
|
34 |
|
35 |
-
# Sample image paths
|
36 |
-
sample_images = [
|
37 |
-
"CXR191_IM-0591-1001.jpg",
|
38 |
-
"CXR191_IM-0598-1001.jpg",
|
39 |
-
"CXR191_IM-0601-1001.jpg",
|
40 |
-
"CXR191_IM-0609-1001.jpg",
|
41 |
-
"CXR191_IM-0618-1001.jpg"
|
42 |
-
]
|
43 |
-
|
44 |
-
# Gradio interface
|
45 |
with gr.Blocks() as demo:
|
46 |
-
|
47 |
-
image = gr.Image(label="Upload Chest X-ray", source="upload")
|
48 |
-
sample_image_gallery = gr.ImageGallery(sample_images, label="Sample Images")
|
49 |
-
with gr.Row():
|
50 |
-
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
|
51 |
-
with gr.Row():
|
52 |
-
caption = gr.Textbox(label="Generated Caption")
|
53 |
-
|
54 |
generated_captions = {
|
55 |
"CLIP-GPT2": "",
|
56 |
"ViT-GPT2": "",
|
57 |
}
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
def predict(img, model_name):
|
60 |
if model_name == "CLIP-GPT2":
|
61 |
return generate_caption_clipgpt(img)
|
@@ -75,74 +87,11 @@ with gr.Blocks() as demo:
|
|
75 |
# Compare captions on button click
|
76 |
compare_button.click(lambda: compare_and_highlight(
|
77 |
generated_captions["CLIP-GPT2"], generated_captions["ViT-GPT2"]
|
78 |
-
|
79 |
|
80 |
-
# Handle changes for both uploaded and sample images
|
81 |
-
gr.Image.change(predict, [image, model_choice], caption)
|
82 |
-
sample_image_gallery.change(predict, [sample_image_gallery, model_choice], caption)
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
# import gradio as gr
|
89 |
-
# from PIL import Image
|
90 |
-
# import clipGPT
|
91 |
-
# import vitGPT
|
92 |
-
# import skimage.io as io
|
93 |
-
# import PIL.Image
|
94 |
-
|
95 |
-
|
96 |
-
# # Caption generation functions
|
97 |
-
# def generate_caption_clipgpt(image):
|
98 |
-
# caption = clipGPT.generate_caption_clipgpt(image)
|
99 |
-
# return caption
|
100 |
|
101 |
-
# def generate_caption_vitgpt(image):
|
102 |
-
# caption = vitGPT.generate_caption(image)
|
103 |
-
# return caption
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
# with gr.Blocks() as demo:
|
108 |
-
|
109 |
-
|
110 |
-
# gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports π₯π€</h1>")
|
111 |
-
# gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
|
112 |
-
|
113 |
-
# with gr.Row():
|
114 |
-
# sample_images = [
|
115 |
-
# "CXR191_IM-0591-1001.png",
|
116 |
-
# "CXR192_IM-0598-1001.png",
|
117 |
-
# "CXR193_IM-0601-1001.png",
|
118 |
-
# "CXR194_IM-0609-1001.png",
|
119 |
-
# "CXR195_IM-0618-1001.png"
|
120 |
-
# ]
|
121 |
-
# image = gr.Image(label="Upload Chest X-ray")
|
122 |
-
# gr.Gallery(
|
123 |
-
# value = sample_images,
|
124 |
-
# label="Sample Images",
|
125 |
-
# )
|
126 |
-
# # sample_images_gallery = gr.Gallery(
|
127 |
-
# # value = sample_images,
|
128 |
-
# # label="Sample Images",
|
129 |
-
# # )
|
130 |
-
# with gr.Row():
|
131 |
-
# model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
|
132 |
-
# generate_button = gr.Button("Generate Caption")
|
133 |
-
# caption = gr.Textbox(label="Generated Caption")
|
134 |
-
|
135 |
-
# def predict(img, model_name):
|
136 |
-
# if model_name == "CLIP-GPT2":
|
137 |
-
# return generate_caption_clipgpt(img)
|
138 |
-
# elif model_name == "ViT-GPT2":
|
139 |
-
# return generate_caption_vitgpt(img)
|
140 |
-
# else:
|
141 |
-
# return "Caption generation for this model is not yet implemented."
|
142 |
-
|
143 |
-
# generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
|
144 |
-
# # sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) # Handle sample images
|
145 |
-
|
146 |
-
|
147 |
-
# demo.launch()
|
148 |
|
|
|
|
1 |
+
|
2 |
+
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import clipGPT
|
6 |
import vitGPT
|
7 |
+
import skimage.io as io
|
8 |
+
import PIL.Image
|
9 |
import difflib
|
10 |
|
11 |
|
|
|
36 |
|
37 |
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
with gr.Blocks() as demo:
|
40 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
generated_captions = {
|
42 |
"CLIP-GPT2": "",
|
43 |
"ViT-GPT2": "",
|
44 |
}
|
45 |
|
46 |
+
gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports π₯π€</h1>")
|
47 |
+
gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
|
48 |
+
|
49 |
+
with gr.Row():
|
50 |
+
sample_images = [
|
51 |
+
"CXR191_IM-0591-1001.png",
|
52 |
+
"CXR192_IM-0598-1001.png",
|
53 |
+
"CXR193_IM-0601-1001.png",
|
54 |
+
"CXR194_IM-0609-1001.png",
|
55 |
+
"CXR195_IM-0618-1001.png"
|
56 |
+
]
|
57 |
+
image = gr.Image(label="Upload Chest X-ray")
|
58 |
+
gr.Gallery(
|
59 |
+
value = sample_images,
|
60 |
+
label="Sample Images",
|
61 |
+
)
|
62 |
+
# sample_images_gallery = gr.Gallery(
|
63 |
+
# value = sample_images,
|
64 |
+
# label="Sample Images",
|
65 |
+
# )
|
66 |
+
with gr.Row():
|
67 |
+
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
|
68 |
+
generate_button = gr.Button("Generate Caption")
|
69 |
+
caption = gr.Textbox(label="Generated Caption")
|
70 |
+
|
71 |
def predict(img, model_name):
|
72 |
if model_name == "CLIP-GPT2":
|
73 |
return generate_caption_clipgpt(img)
|
|
|
87 |
# Compare captions on button click
|
88 |
compare_button.click(lambda: compare_and_highlight(
|
89 |
generated_captions["CLIP-GPT2"], generated_captions["ViT-GPT2"]
|
90 |
+
), [], comparison_result)
|
91 |
|
|
|
|
|
|
|
92 |
|
93 |
+
generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
|
94 |
+
# sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) # Handle sample images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
+
demo.launch()
|