songhieng commited on
Commit
0f813fa
·
verified ·
1 Parent(s): 9b5823d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -0
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import face_recognition
2
+ import numpy as np
3
+ import pickle
4
+ from mtcnn import MTCNN
5
+ from PIL import Image
6
+ import cv2
7
+ import faiss
8
+ import imgaug.augmenters as iaa
9
+ import os
10
+ import gradio as gr
11
+
12
+ def detect_and_align_face(image_path):
13
+ detector = MTCNN()
14
+ image = cv2.imread(image_path)
15
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
16
+ detections = detector.detect_faces(image_rgb)
17
+
18
+ if len(detections) == 0:
19
+ raise ValueError("No face detected in the image.")
20
+
21
+ detection = detections[0]
22
+ x, y, width, height = detection['box']
23
+ keypoints = detection['keypoints']
24
+ face = image_rgb[y:y+height, x:x+width]
25
+
26
+ left_eye = keypoints['left_eye']
27
+ right_eye = keypoints['right_eye']
28
+ delta_x = right_eye[0] - left_eye[0]
29
+ delta_y = right_eye[1] - left_eye[1]
30
+ angle = np.arctan2(delta_y, delta_x) * (180.0 / np.pi)
31
+
32
+ center = ((x + x + width) // 2, (y + y + height) // 2)
33
+ rot_matrix = cv2.getRotationMatrix2D(center, angle, scale=1.0)
34
+ aligned_image = cv2.warpAffine(image_rgb, rot_matrix, (image_rgb.shape[1], image_rgb.shape[0]))
35
+ aligned_face = aligned_image[y:y+height, x:x+width]
36
+
37
+ return Image.fromarray(aligned_face)
38
+
39
+ def load_encodings(file_path):
40
+ with open(file_path, "rb") as file:
41
+ data = pickle.load(file)
42
+ return np.array(data["encodings"]), data["labels"]
43
+
44
+ def save_encodings(encodings, labels, file_path):
45
+ data = {"encodings": encodings, "labels": labels}
46
+ with open(file_path, "wb") as file:
47
+ pickle.dump(data, file)
48
+
49
+ def create_faiss_index(known_encodings):
50
+ dimension = known_encodings.shape[1]
51
+ index = faiss.IndexFlatL2(dimension)
52
+ index.add(known_encodings)
53
+ return index
54
+
55
+ def encode_face(image):
56
+ img_array = np.array(image)
57
+ encodings = face_recognition.face_encodings(img_array)
58
+ return encodings[0] if encodings else None
59
+
60
+ def augment_image(image, num_augmented=5):
61
+ image = np.array(image)
62
+ aug = iaa.Sequential([
63
+ iaa.Fliplr(0.5), # horizontal flips
64
+ iaa.Affine(rotate=(-25, 25)), # rotation
65
+ iaa.AdditiveGaussianNoise(scale=(0, 0.05*255)), # noise
66
+ iaa.Multiply((0.8, 1.2)), # brightness
67
+ iaa.GaussianBlur(sigma=(0.0, 1.0)) # blur
68
+ ])
69
+ augmented_images = [Image.fromarray(aug(image=image)) for _ in range(num_augmented)]
70
+ return augmented_images
71
+
72
+ def update_dataset_with_verified_image(image, encodings_file, label, num_augmented=5):
73
+ known_encodings, known_labels = load_encodings(encodings_file)
74
+ augmented_images = augment_image(image, num_augmented=num_augmented)
75
+ images_to_encode = [image] + augmented_images
76
+ for img in images_to_encode:
77
+ img_array = np.array(img)
78
+ encoding = face_recognition.face_encodings(img_array)[0]
79
+ known_encodings = np.append(known_encodings, [encoding], axis=0)
80
+ known_labels.append(label)
81
+ save_encodings(known_encodings, known_labels, encodings_file)
82
+
83
+ def verify_face_with_faiss(image, encodings_file, similarity_threshold=70, num_augmented=5):
84
+ aligned_face = image.convert("RGB")
85
+ target_encoding = face_recognition.face_encodings(np.array(aligned_face))[0].reshape(1, -1)
86
+
87
+ known_encodings, known_labels = load_encodings(encodings_file)
88
+ known_encodings = np.array(known_encodings)
89
+
90
+ index = create_faiss_index(known_encodings)
91
+
92
+ distances, indices = index.search(target_encoding, 1)
93
+
94
+ best_match_index = indices[0][0]
95
+ best_similarity_percentage = (1 - distances[0][0]) * 100
96
+
97
+ is_match = best_similarity_percentage >= similarity_threshold
98
+
99
+ if is_match:
100
+ matched_label = known_labels[best_match_index]
101
+ update_dataset_with_verified_image(image, encodings_file, matched_label, num_augmented=num_augmented)
102
+ return True, f"Match found: {matched_label}, Similarity: {best_similarity_percentage:.2f}%"
103
+ else:
104
+ return False, "No match found."
105
+
106
+ # Define the Gradio interface
107
+ def gradio_interface(image, similarity_threshold=70):
108
+ encodings_file = "face_encoding.pkl"
109
+ result, message = verify_face_with_faiss(image, encodings_file, similarity_threshold=similarity_threshold)
110
+ return message
111
+
112
+ # Launch the Gradio interface
113
+ iface = gr.Interface(fn=gradio_interface,
114
+ inputs=[gr.Image(type="pil"), gr.Slider(0, 100, value=70, label="Similarity Threshold")],
115
+ outputs="text",
116
+ title="Face Recognition with MTCNN and FAISS",
117
+ description="Upload an image to see if it matches any face in the database.")
118
+
119
+ iface.launch()