Spaces:
Runtime error
Runtime error
import spaces | |
import gradio as gr | |
import subprocess | |
from PIL import Image | |
import json | |
import mp_box | |
''' | |
Face landmark detection based Face Detection. | |
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker | |
from model card | |
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf | |
Licensed Apache License, Version 2.0 | |
Train with google's dataset(more detail see model card) | |
Not Face Detector based | |
https://ai.google.dev/edge/mediapipe/solutions/vision/face_detector | |
Bacause this is part of getting-landmark program and need control face edge. | |
So I don't know which one is better.never compare these. | |
''' | |
#@spaces.GPU(duration=120) | |
def process_images(image,no_mesh_draw=False,square_shape=False,progress=gr.Progress(track_tqdm=True)): | |
if image == None: | |
raise gr.Error("Need Image") | |
progress(0, desc="Start Mediapipe") | |
boxes,mp_image,face_landmarker_result = mp_box.mediapipe_to_box(image) | |
if no_mesh_draw: | |
annotated_image = image | |
else: | |
annotated_image = mp_box.draw_landmarks_on_image(face_landmarker_result,image) | |
annotation_boxes = [] | |
jsons ={ | |
} | |
index = 1 | |
print(boxes) | |
if square_shape: | |
xy_boxes = boxes[3:] | |
else: | |
xy_boxes = boxes[:3] | |
print(len(xy_boxes)) | |
for box in xy_boxes: | |
label=f"type-{index}" | |
print(box) | |
print(mp_box.xywh_to_xyxy(box)) | |
annotation_boxes.append([mp_box.xywh_to_xyxy(box),label]) | |
jsons[label] = boxes[index-1] | |
print(index) | |
index+=1 | |
annotation_boxes.append(([0,0,1,1],"None")) | |
#print(annotation_boxes) | |
formatted_json = json.dumps(jsons, indent=1) | |
#return image | |
return [annotated_image,annotation_boxes],formatted_json | |
def read_file(file_path: str) -> str: | |
"""read the text of target file | |
""" | |
with open(file_path, 'r', encoding='utf-8') as f: | |
content = f.read() | |
return content | |
css=""" | |
#col-left { | |
margin: 0 auto; | |
max-width: 640px; | |
} | |
#col-right { | |
margin: 0 auto; | |
max-width: 640px; | |
} | |
.grid-container { | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
gap:10px | |
} | |
.image { | |
width: 128px; | |
height: 128px; | |
object-fit: cover; | |
} | |
.text { | |
font-size: 16px; | |
} | |
""" | |
#css=css, | |
with gr.Blocks(css=css, elem_id="demo-container") as demo: | |
with gr.Column(): | |
gr.HTML(read_file("demo_header.html")) | |
gr.HTML(read_file("demo_tools.html")) | |
with gr.Row(): | |
with gr.Column(): | |
image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Upload") | |
with gr.Row(elem_id="prompt-container", equal_height=False): | |
with gr.Row(): | |
btn = gr.Button("Face Detect", elem_id="run_button",variant="primary") | |
with gr.Accordion(label="Advanced Settings", open=False): | |
with gr.Row( equal_height=True): | |
no_mesh_draw = gr.Checkbox(label="No Mesh Drawing") | |
square_shape = gr.Checkbox(label="Square shape") | |
with gr.Column(): | |
image_out = gr.AnnotatedImage(label="Output", elem_id="output-img") | |
text_out = gr.TextArea(label="JSON-Output") | |
btn.click(fn=process_images, inputs=[image,no_mesh_draw], outputs =[image_out,text_out], api_name='infer') | |
gr.Examples( | |
examples =["examples/00004200.jpg"], | |
inputs=[image] | |
) | |
gr.HTML(read_file("demo_footer.html")) | |
if __name__ == "__main__": | |
demo.launch() | |