Siyun He commited on
Commit
5ed34fb
·
1 Parent(s): 52be6f8

add lip color

Browse files
Files changed (1) hide show
  1. app.py +110 -6
app.py CHANGED
@@ -59,6 +59,108 @@ def change_glasses():
59
  overlay = cv2.imread(f'glasses/glass{num}.png', cv2.IMREAD_UNCHANGED)
60
  return overlay
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  # Process frame for overlay and face shape detection
63
  def process_frame(frame):
64
  global overlay
@@ -196,12 +298,12 @@ def save_frame(frame):
196
 
197
  return f"Frame saved as '{filename}'"
198
 
199
-
200
-
201
- # Gradio webcam input
202
- def webcam_input(frame, transform):
203
  frame, face_shape, glass_shape = process_frame(frame)
204
- frame = transform_cv2(frame, transform)
 
 
 
205
  return frame, face_shape, glass_shape
206
 
207
  # Gradio Interface
@@ -211,6 +313,8 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue"))
211
  with gr.Group(elem_classes=["my-group"]):
212
  transform = gr.Dropdown(choices=["cartoon", "edges", "sepia", "negative", "sketch", "blur", "none"],
213
  value="none", label="Select Filter")
 
 
214
  gr.Markdown("Click the Webcam icon to start the camera, and then press the record button to start the virtual try-on.")
215
  input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
216
  gr.Markdown("Face Shape and Recommended Glass Shape")
@@ -219,7 +323,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue"))
219
  next_button = gr.Button("Next Glasses➡️")
220
  save_button = gr.Button("Save as a Picture📌")
221
 
222
- input_img.stream(webcam_input, [input_img, transform], [input_img, face_shape_output, glass_shape_output], stream_every=0.1)
223
  with gr.Row():
224
  next_button.click(change_glasses, [], [])
225
  with gr.Row():
 
59
  overlay = cv2.imread(f'glasses/glass{num}.png', cv2.IMREAD_UNCHANGED)
60
  return overlay
61
 
62
+ def change_lip_color(frame, color_name='none'):
63
+ # Define a mapping from color names to BGR values
64
+ color_map = {
65
+ 'classic_red': (255, 0, 0), # Classic red
66
+ 'deep_red': (139, 0, 0), # Deep red
67
+ 'cherry_red': (205, 0, 0), # Cherry red
68
+ 'rose_red': (204, 102, 0), # Rose red
69
+ 'wine_red': (128, 0, 0), # Wine red
70
+ 'brick_red': (128, 64, 0), # Brick red
71
+ 'coral_red': (255, 128, 0), # Coral red
72
+ 'berry_red': (153, 0, 0), # Berry red
73
+ 'ruby_red': (255, 17, 0), # Ruby red
74
+ 'crimson_red': (220, 20, 60), # Crimson red
75
+ }
76
+
77
+ # Get the BGR color from the color name
78
+ color = color_map.get(color_name, None)
79
+
80
+ # If 'none' is selected, return the original frame
81
+ if color is None:
82
+ return frame
83
+
84
+ # Convert to RGB for processing
85
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
86
+ results = face_mesh.process(frame_rgb)
87
+
88
+ if results.multi_face_landmarks:
89
+ for face_landmarks in results.multi_face_landmarks:
90
+ # Define the region for the upper lip using landmark indices
91
+ upper_lip_region = np.array([
92
+ (face_landmarks.landmark[61].x * frame.shape[1], face_landmarks.landmark[61].y * frame.shape[0]),
93
+ (face_landmarks.landmark[185].x * frame.shape[1], face_landmarks.landmark[185].y * frame.shape[0]),
94
+ (face_landmarks.landmark[40].x * frame.shape[1], face_landmarks.landmark[40].y * frame.shape[0]),
95
+ (face_landmarks.landmark[39].x * frame.shape[1], face_landmarks.landmark[39].y * frame.shape[0]),
96
+ (face_landmarks.landmark[37].x * frame.shape[1], face_landmarks.landmark[37].y * frame.shape[0]),
97
+ (face_landmarks.landmark[0].x * frame.shape[1], face_landmarks.landmark[0].y * frame.shape[0]),
98
+ (face_landmarks.landmark[267].x * frame.shape[1], face_landmarks.landmark[267].y * frame.shape[0]),
99
+ (face_landmarks.landmark[269].x * frame.shape[1], face_landmarks.landmark[269].y * frame.shape[0]),
100
+ (face_landmarks.landmark[270].x * frame.shape[1], face_landmarks.landmark[270].y * frame.shape[0]),
101
+ (face_landmarks.landmark[409].x * frame.shape[1], face_landmarks.landmark[409].y * frame.shape[0]),
102
+ (face_landmarks.landmark[291].x * frame.shape[1], face_landmarks.landmark[291].y * frame.shape[0]),
103
+ (face_landmarks.landmark[61].x * frame.shape[1], face_landmarks.landmark[61].y * frame.shape[0])
104
+ ], np.int32)
105
+
106
+ # Define the region for the lower lip using landmark indices
107
+ lower_lip_region = np.array([
108
+ (face_landmarks.landmark[61].x * frame.shape[1], face_landmarks.landmark[61].y * frame.shape[0]),
109
+ (face_landmarks.landmark[146].x * frame.shape[1], face_landmarks.landmark[146].y * frame.shape[0]),
110
+ (face_landmarks.landmark[91].x * frame.shape[1], face_landmarks.landmark[91].y * frame.shape[0]),
111
+ (face_landmarks.landmark[181].x * frame.shape[1], face_landmarks.landmark[181].y * frame.shape[0]),
112
+ (face_landmarks.landmark[84].x * frame.shape[1], face_landmarks.landmark[84].y * frame.shape[0]),
113
+ (face_landmarks.landmark[17].x * frame.shape[1], face_landmarks.landmark[17].y * frame.shape[0]),
114
+ (face_landmarks.landmark[314].x * frame.shape[1], face_landmarks.landmark[314].y * frame.shape[0]),
115
+ (face_landmarks.landmark[405].x * frame.shape[1], face_landmarks.landmark[405].y * frame.shape[0]),
116
+ (face_landmarks.landmark[321].x * frame.shape[1], face_landmarks.landmark[321].y * frame.shape[0]),
117
+ (face_landmarks.landmark[375].x * frame.shape[1], face_landmarks.landmark[375].y * frame.shape[0]),
118
+ (face_landmarks.landmark[291].x * frame.shape[1], face_landmarks.landmark[291].y * frame.shape[0]),
119
+ (face_landmarks.landmark[61].x * frame.shape[1], face_landmarks.landmark[61].y * frame.shape[0])
120
+ ], np.int32)
121
+
122
+ lip_region = np.concatenate((upper_lip_region, lower_lip_region), axis=0)
123
+
124
+ # Define the region for the teeth using landmark indices
125
+ teeth_region = np.array([
126
+ (face_landmarks.landmark[78].x * frame.shape[1], face_landmarks.landmark[78].y * frame.shape[0]),
127
+ (face_landmarks.landmark[95].x * frame.shape[1], face_landmarks.landmark[95].y * frame.shape[0]),
128
+ (face_landmarks.landmark[88].x * frame.shape[1], face_landmarks.landmark[88].y * frame.shape[0]),
129
+ (face_landmarks.landmark[178].x * frame.shape[1], face_landmarks.landmark[178].y * frame.shape[0]),
130
+ (face_landmarks.landmark[87].x * frame.shape[1], face_landmarks.landmark[87].y * frame.shape[0]),
131
+ (face_landmarks.landmark[14].x * frame.shape[1], face_landmarks.landmark[14].y * frame.shape[0]),
132
+ (face_landmarks.landmark[317].x * frame.shape[1], face_landmarks.landmark[317].y * frame.shape[0]),
133
+ (face_landmarks.landmark[402].x * frame.shape[1], face_landmarks.landmark[402].y * frame.shape[0]),
134
+ (face_landmarks.landmark[318].x * frame.shape[1], face_landmarks.landmark[318].y * frame.shape[0]),
135
+ (face_landmarks.landmark[324].x * frame.shape[1], face_landmarks.landmark[324].y * frame.shape[0]),
136
+ (face_landmarks.landmark[308].x * frame.shape[1], face_landmarks.landmark[308].y * frame.shape[0]),
137
+ (face_landmarks.landmark[78].x * frame.shape[1], face_landmarks.landmark[78].y * frame.shape[0])
138
+ ], np.int32)
139
+
140
+ # Create a mask for the lip region
141
+ lip_mask = np.zeros(frame.shape[:2], dtype=np.uint8)
142
+ cv2.fillPoly(lip_mask, [lip_region], 255)
143
+
144
+ # Create a mask for the teeth region
145
+ teeth_mask = np.zeros(frame.shape[:2], dtype=np.uint8)
146
+ cv2.fillPoly(teeth_mask, [teeth_region], 255)
147
+
148
+ # Subtract the teeth mask from the lip mask
149
+ final_mask = cv2.subtract(lip_mask, teeth_mask)
150
+
151
+ # Create a colored lip image
152
+ colored_lips = np.zeros_like(frame)
153
+ colored_lips[:] = color
154
+
155
+ # Apply the colored lips only to the lip region
156
+ lips_colored = cv2.bitwise_and(colored_lips, colored_lips, mask=final_mask)
157
+
158
+ # Combine the original frame with the colored lips
159
+ frame = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(final_mask))
160
+ frame = cv2.add(frame, lips_colored)
161
+
162
+ return frame
163
+
164
  # Process frame for overlay and face shape detection
165
  def process_frame(frame):
166
  global overlay
 
298
 
299
  return f"Frame saved as '{filename}'"
300
 
301
+ def webcam_input(frame, transform, lip_color):
 
 
 
302
  frame, face_shape, glass_shape = process_frame(frame)
303
+ if transform != "none" and lip_color == "none":
304
+ frame = transform_cv2(frame, transform)
305
+ elif lip_color != "none" and transform == "none":
306
+ frame = change_lip_color(frame, lip_color)
307
  return frame, face_shape, glass_shape
308
 
309
  # Gradio Interface
 
313
  with gr.Group(elem_classes=["my-group"]):
314
  transform = gr.Dropdown(choices=["cartoon", "edges", "sepia", "negative", "sketch", "blur", "none"],
315
  value="none", label="Select Filter")
316
+ lip_color = gr.Dropdown(choices=["classic_red", "deep_red", "cherry_red", "rose_red", "wine_red", "brick_red", "coral_red", "berry_red", "ruby_red", "crimson_red", "none"],
317
+ value="none", label="Select Lip Color")
318
  gr.Markdown("Click the Webcam icon to start the camera, and then press the record button to start the virtual try-on.")
319
  input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
320
  gr.Markdown("Face Shape and Recommended Glass Shape")
 
323
  next_button = gr.Button("Next Glasses➡️")
324
  save_button = gr.Button("Save as a Picture📌")
325
 
326
+ input_img.stream(webcam_input, [input_img, transform, lip_color], [input_img, face_shape_output, glass_shape_output], stream_every=0.1)
327
  with gr.Row():
328
  next_button.click(change_glasses, [], [])
329
  with gr.Row():