File size: 4,451 Bytes
c180e59 c84b55d c180e59 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import gradio as gr
import torch
from torchvision import transforms
from PIL import Image
import numpy as np
from trainer import CustomSemanticSegmentationTask
# Load a pre-trained semantic segmentation model
task = CustomSemanticSegmentationTask.load_from_checkpoint("maui_demo_model.ckpt", map_location="cpu")
task.freeze()
model = task.model
model = model.eval()
# Define the image transformations
preprocess = transforms.Compose([
transforms.ToTensor(),
])
# Function to perform semantic segmentation
def segment_image(image):
input_tensor = preprocess(image).unsqueeze(0)
with torch.inference_mode():
output = model(input_tensor)
output_predictions = output.argmax(1).squeeze().numpy()
return output_predictions
# Preexisting images
preexisting_images = ["crop1.png", "crop2.png", "crop3.png"]
# Function to handle user input and run the model
def handle_image(image):
image = Image.open(image)
mask = segment_image(image)
# Decode the segmentation output
colormap = np.array([
[0, 0, 0], # nodata
[0, 0, 0], # background
[0, 255, 0], # building
[255, 0, 0], # damage
])
output = colormap[mask].astype('uint8')
segmented_image = np.array(image)
segmented_image[mask > 1] = (0.5 * output[mask > 1]) + (0.5 * segmented_image[mask > 1])
segmented_image = Image.fromarray(segmented_image)
return segmented_image
# Create the Gradio interface
image_input = gr.Image(type="filepath", label="Upload an Image", sources=["upload"])
image_output = gr.Image(type="pil", label="Output")
css_content = """
.legend {
list-style: none;
padding: 0;
}
.legend li {
line-height: 20px; /* Match the height of the color-box */
}
.legend .color-box {
display: inline-block;
width: 20px;
height: 20px;
margin-right: 5px;
border: 1px solid #000; /* Optional: adds a border around the color box */
vertical-align: middle; /* Centers the box vertically relative to the text */
}
.background { background-color: #FFFFFF; } /* White */
.building { background-color: #00FF00; } /* Green */
.damage { background-color: #FF0000; } /* Red */
"""
html_content = """
<div style="font-size:large;">
<p>
This application demonstrates the input and output of the building damage assessment model trained through the tutorial of the
<a href="https://github.com/microsoft/building-damage-assessment/" target="_blank">Microsoft AI for Good Building Damage
Assessment Toolkit</a>. This particular model was trained on
<a href="https://radiantearth.github.io/stac-browser/#/external/maxar-opendata.s3.amazonaws.com/events/Maui-Hawaii-fires-Aug-23/collection.json?.language=en">Maxar Open Data imagery</a>
captured over Lahaina during the Maui Wildfires in August, 2023 and 106 polygon annotations
created over the same imagery. The "Building Damage Assessment Toolkit" details a workflow for quickly
modeling <i>any</i> new post disaster imagery by:
<ul style='padding-left:20px'>
<li>Setting up a web labeling tool instance with the post disaster imagery to facilitate rapid annotations of the imagery</li>
<li>Fine-tuning a building damage assessment model using the imagery and annotations</li>
<li>Running inference with the model over potentially large scenes</li>
<li>Merging and summarizing the output of the model using different building footprints layers</li>
</ul>
This workflow allows a user to consistently and rapidly create a good model for a particular event,
but that is overfit to that event (i.e. it will not generalize to other events).
</p>
<p>
The model outputs per-pixel predictions of building damage, with the following classes:
<ul class="legend">
<li><span class="color-box background"></span>Background (transparent)</li>
<li><span class="color-box building"></span>Building (green)</li>
<li><span class="color-box damage"></span>Damage (red)</li>
</ul>
</p>
</div>
"""
iface = gr.Interface(
fn=handle_image,
inputs=image_input,
outputs=image_output,
title="Building damage assessment model demo -- Maui Wildfires 2023",
examples=preexisting_images,
css=css_content,
description=html_content,
allow_flagging="never"
)
# Launch the app
iface.launch(share=True)
|