File size: 3,831 Bytes
0dda723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae6ad89
 
 
0dda723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae6ad89
0dda723
 
ae6ad89
 
56e166f
 
0dda723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56e166f
0dda723
 
 
 
ae6ad89
0dda723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import spaces
import gradio as gr
import subprocess
from PIL import Image
import json

import mp_box
'''
Face landmark detection based Face Detection.
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
from model card
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
Licensed Apache License, Version 2.0
Train with google's dataset(more detail see model card)


Not Face Detector based 
https://ai.google.dev/edge/mediapipe/solutions/vision/face_detector

Bacause this is part of getting-landmark program and need control face edge.
So I don't know which one is better.never compare these.
'''
#@spaces.GPU(duration=120)
def process_images(image,no_mesh_draw=False,square_shape=False,progress=gr.Progress(track_tqdm=True)):
    if image == None:
        raise gr.Error("Need Image")

    progress(0, desc="Start Mediapipe")
    
    boxes,mp_image,face_landmarker_result = mp_box.mediapipe_to_box(image)
    if no_mesh_draw:
         annotated_image = image
    else:
        annotated_image = mp_box.draw_landmarks_on_image(face_landmarker_result,image)
    annotation_boxes = []
    
    jsons ={
         
    }
    index = 1
    
    print(boxes)
    if square_shape:
        xy_boxes = boxes[3:]
    else:
        xy_boxes = boxes[:3]

    print(len(xy_boxes))
    for box in xy_boxes:
       label=f"type-{index}"
       print(box)
       print(mp_box.xywh_to_xyxy(box))
       annotation_boxes.append([mp_box.xywh_to_xyxy(box),label])
       
       
       jsons[label] = boxes[index-1]
       print(index)
       index+=1
    
    annotation_boxes.append(([0,0,1,1],"None"))
    #print(annotation_boxes)
    formatted_json = json.dumps(jsons, indent=1)
    #return image   
    return [annotated_image,annotation_boxes],formatted_json
    

def read_file(file_path: str) -> str:
    """read the text of target file
    """
    with open(file_path, 'r', encoding='utf-8') as f:
        content = f.read()

    return content

css="""
#col-left {
    margin: 0 auto;
    max-width: 640px;
}
#col-right {
    margin: 0 auto;
    max-width: 640px;
}
.grid-container {
  display: flex;
  align-items: center;
  justify-content: center;
  gap:10px
}

.image {
  width: 128px; 
  height: 128px; 
  object-fit: cover; 
}

.text {
  font-size: 16px;
}
"""

#css=css,

with gr.Blocks(css=css, elem_id="demo-container") as demo:
    with gr.Column():
        gr.HTML(read_file("demo_header.html"))
        gr.HTML(read_file("demo_tools.html"))
    with gr.Row():
                with gr.Column():
                    image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Upload")
                    with gr.Row(elem_id="prompt-container",  equal_height=False):
                        with gr.Row():
                            btn = gr.Button("Face Detect", elem_id="run_button",variant="primary")
                    
                    with gr.Accordion(label="Advanced Settings", open=False):
                        with gr.Row( equal_height=True):
                            no_mesh_draw = gr.Checkbox(label="No Mesh Drawing")
                            square_shape = gr.Checkbox(label="Square shape")
                    
                with gr.Column():
                    image_out = gr.AnnotatedImage(label="Output", elem_id="output-img")
                    text_out = gr.TextArea(label="JSON-Output")
                    
                    
            

    btn.click(fn=process_images, inputs=[image,no_mesh_draw], outputs =[image_out,text_out], api_name='infer')
    gr.Examples(
                examples =["examples/00004200.jpg"],
                inputs=[image]
    )
    gr.HTML(read_file("demo_footer.html"))

    if __name__ == "__main__":
        demo.launch()