|
import gradio as gr |
|
import torch |
|
from torchvision import transforms |
|
from PIL import Image |
|
import numpy as np |
|
from trainer import CustomSemanticSegmentationTask |
|
|
|
|
|
task = CustomSemanticSegmentationTask.load_from_checkpoint("maui_demo_model.ckpt", map_location="cpu") |
|
task.freeze() |
|
model = task.model |
|
model = model.eval() |
|
|
|
|
|
|
|
preprocess = transforms.Compose([ |
|
transforms.ToTensor(), |
|
]) |
|
|
|
|
|
def segment_image(image): |
|
input_tensor = preprocess(image).unsqueeze(0) |
|
with torch.inference_mode(): |
|
output = model(input_tensor) |
|
output_predictions = output.argmax(1).squeeze().numpy() |
|
return output_predictions |
|
|
|
|
|
|
|
preexisting_images = ["crop1.png", "crop2.png", "crop3.png"] |
|
|
|
|
|
def handle_image(image): |
|
image = Image.open(image) |
|
mask = segment_image(image) |
|
|
|
|
|
colormap = np.array([ |
|
[0, 0, 0], |
|
[0, 0, 0], |
|
[0, 255, 0], |
|
[255, 0, 0], |
|
]) |
|
output = colormap[mask].astype('uint8') |
|
|
|
segmented_image = np.array(image) |
|
segmented_image[mask > 1] = (0.5 * output[mask > 1]) + (0.5 * segmented_image[mask > 1]) |
|
segmented_image = Image.fromarray(segmented_image) |
|
return segmented_image |
|
|
|
|
|
image_input = gr.Image(type="filepath", label="Upload an Image", sources=["upload"]) |
|
image_output = gr.Image(type="pil", label="Output") |
|
|
|
css_content = """ |
|
.legend { |
|
list-style: none; |
|
padding: 0; |
|
} |
|
.legend li { |
|
line-height: 20px; /* Match the height of the color-box */ |
|
} |
|
.legend .color-box { |
|
display: inline-block; |
|
width: 20px; |
|
height: 20px; |
|
margin-right: 5px; |
|
border: 1px solid #000; /* Optional: adds a border around the color box */ |
|
vertical-align: middle; /* Centers the box vertically relative to the text */ |
|
} |
|
.background { background-color: #FFFFFF; } /* White */ |
|
.building { background-color: #00FF00; } /* Green */ |
|
.damage { background-color: #FF0000; } /* Red */ |
|
""" |
|
|
|
html_content = """ |
|
<div style="font-size:large;"> |
|
<p> |
|
This application demonstrates the input and output of the building damage assessment model trained through the tutorial of the |
|
<a href="https://github.com/microsoft/building-damage-assessment/" target="_blank">Microsoft AI for Good Building Damage |
|
Assessment Toolkit</a>. This particular model was trained on |
|
<a href="https://radiantearth.github.io/stac-browser/#/external/maxar-opendata.s3.amazonaws.com/events/Maui-Hawaii-fires-Aug-23/collection.json?.language=en">Maxar Open Data imagery</a> |
|
captured over Lahaina during the Maui Wildfires in August, 2023 and 106 polygon annotations |
|
created over the same imagery. The "Building Damage Assessment Toolkit" details a workflow for quickly |
|
modeling <i>any</i> new post disaster imagery by: |
|
<ul style='padding-left:20px'> |
|
<li>Setting up a web labeling tool instance with the post disaster imagery to facilitate rapid annotations of the imagery</li> |
|
<li>Fine-tuning a building damage assessment model using the imagery and annotations</li> |
|
<li>Running inference with the model over potentially large scenes</li> |
|
<li>Merging and summarizing the output of the model using different building footprints layers</li> |
|
</ul> |
|
This workflow allows a user to consistently and rapidly create a good model for a particular event, |
|
but that is overfit to that event (i.e. it will not generalize to other events). |
|
</p> |
|
<p> |
|
The model outputs per-pixel predictions of building damage, with the following classes: |
|
<ul class="legend"> |
|
<li><span class="color-box background"></span>Background (transparent)</li> |
|
<li><span class="color-box building"></span>Building (green)</li> |
|
<li><span class="color-box damage"></span>Damage (red)</li> |
|
</ul> |
|
</p> |
|
</div> |
|
""" |
|
|
|
iface = gr.Interface( |
|
fn=handle_image, |
|
inputs=image_input, |
|
outputs=image_output, |
|
title="Building damage assessment model demo -- Maui Wildfires 2023", |
|
examples=preexisting_images, |
|
css=css_content, |
|
description=html_content, |
|
allow_flagging="never" |
|
) |
|
|
|
|
|
iface.launch(share=True) |
|
|