File size: 7,497 Bytes
7951466 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
# 3rd party dependencies
import pytest
import cv2
# project dependencies
from deepface import DeepFace
from deepface.commons.logger import Logger
logger = Logger()
models = ["VGG-Face", "Facenet", "Facenet512", "ArcFace", "GhostFaceNet"]
metrics = ["cosine", "euclidean", "euclidean_l2"]
detectors = ["opencv", "mtcnn"]
def test_different_facial_recognition_models():
dataset = [
["dataset/img1.jpg", "dataset/img2.jpg", True],
["dataset/img5.jpg", "dataset/img6.jpg", True],
["dataset/img6.jpg", "dataset/img7.jpg", True],
["dataset/img8.jpg", "dataset/img9.jpg", True],
["dataset/img1.jpg", "dataset/img11.jpg", True],
["dataset/img2.jpg", "dataset/img11.jpg", True],
["dataset/img1.jpg", "dataset/img3.jpg", False],
["dataset/img2.jpg", "dataset/img3.jpg", False],
["dataset/img6.jpg", "dataset/img8.jpg", False],
["dataset/img6.jpg", "dataset/img9.jpg", False],
]
expected_coverage = 97.53 # human level accuracy on LFW
successful_tests = 0
unsuccessful_tests = 0
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
threshold = resp_obj["threshold"]
if prediction is result:
test_result_label = "β
"
successful_tests += 1
else:
test_result_label = "β"
unsuccessful_tests += 1
if prediction is True:
classified_label = "same person"
else:
classified_label = "different persons"
img1_alias = img1.split("/", maxsplit=1)[-1]
img2_alias = img2.split("/", maxsplit=1)[-1]
logger.debug(
f"{test_result_label} Pair {img1_alias}-{img2_alias}"
f" is {classified_label} based on {model}-{metric}"
f" (Distance: {distance}, Threshold: {threshold})",
)
coverage_score = (100 * successful_tests) / (successful_tests + unsuccessful_tests)
assert (
coverage_score > expected_coverage
), f"β facial recognition models test failed with {coverage_score} score"
logger.info(f"β
facial recognition models test passed with {coverage_score}")
def test_different_face_detectors():
for detector in detectors:
res = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", detector_backend=detector)
assert isinstance(res, dict)
assert "verified" in res.keys()
assert res["verified"] in [True, False]
assert "distance" in res.keys()
assert "threshold" in res.keys()
assert "model" in res.keys()
assert "detector_backend" in res.keys()
assert "similarity_metric" in res.keys()
assert "facial_areas" in res.keys()
assert "img1" in res["facial_areas"].keys()
assert "img2" in res["facial_areas"].keys()
assert "x" in res["facial_areas"]["img1"].keys()
assert "y" in res["facial_areas"]["img1"].keys()
assert "w" in res["facial_areas"]["img1"].keys()
assert "h" in res["facial_areas"]["img1"].keys()
assert "x" in res["facial_areas"]["img2"].keys()
assert "y" in res["facial_areas"]["img2"].keys()
assert "w" in res["facial_areas"]["img2"].keys()
assert "h" in res["facial_areas"]["img2"].keys()
logger.info(f"β
test verify for {detector} backend done")
def test_verify_for_preloaded_image():
img1 = cv2.imread("dataset/img1.jpg")
img2 = cv2.imread("dataset/img2.jpg")
res = DeepFace.verify(img1, img2)
assert res["verified"] is True
logger.info("β
test verify for pre-loaded image done")
def test_verify_for_precalculated_embeddings():
model_name = "Facenet"
img1_path = "dataset/img1.jpg"
img2_path = "dataset/img2.jpg"
img1_embedding = DeepFace.represent(img_path=img1_path, model_name=model_name)[0]["embedding"]
img2_embedding = DeepFace.represent(img_path=img2_path, model_name=model_name)[0]["embedding"]
result = DeepFace.verify(
img1_path=img1_embedding, img2_path=img2_embedding, model_name=model_name, silent=True
)
assert result["verified"] is True
assert result["distance"] < result["threshold"]
assert result["model"] == model_name
assert result["facial_areas"]["img1"] is not None
assert result["facial_areas"]["img2"] is not None
assert isinstance(result["facial_areas"]["img1"], dict)
assert isinstance(result["facial_areas"]["img2"], dict)
assert "x" in result["facial_areas"]["img1"].keys()
assert "y" in result["facial_areas"]["img1"].keys()
assert "w" in result["facial_areas"]["img1"].keys()
assert "h" in result["facial_areas"]["img1"].keys()
assert "left_eye" in result["facial_areas"]["img1"].keys()
assert "right_eye" in result["facial_areas"]["img1"].keys()
assert "x" in result["facial_areas"]["img2"].keys()
assert "y" in result["facial_areas"]["img2"].keys()
assert "w" in result["facial_areas"]["img2"].keys()
assert "h" in result["facial_areas"]["img2"].keys()
assert "left_eye" in result["facial_areas"]["img2"].keys()
assert "right_eye" in result["facial_areas"]["img2"].keys()
logger.info("β
test verify for pre-calculated embeddings done")
def test_verify_with_precalculated_embeddings_for_incorrect_model():
# generate embeddings with VGG (default)
img1_path = "dataset/img1.jpg"
img2_path = "dataset/img2.jpg"
img1_embedding = DeepFace.represent(img_path=img1_path)[0]["embedding"]
img2_embedding = DeepFace.represent(img_path=img2_path)[0]["embedding"]
with pytest.raises(
ValueError,
match="embeddings of Facenet should have 128 dimensions, but 1-th image has 4096 dimensions input",
):
_ = DeepFace.verify(
img1_path=img1_embedding, img2_path=img2_embedding, model_name="Facenet", silent=True
)
logger.info("β
test verify with pre-calculated embeddings for incorrect model done")
def test_verify_for_broken_embeddings():
img1_embeddings = ["a", "b", "c"]
img2_embeddings = [1, 2, 3]
with pytest.raises(
ValueError,
match="When passing img1_path as a list, ensure that all its items are of type float.",
):
_ = DeepFace.verify(img1_path=img1_embeddings, img2_path=img2_embeddings)
logger.info("β
test verify for broken embeddings content is done")
def test_verify_for_nested_embeddings():
"""
batch embeddings not supported
"""
img1_embeddings = [[1, 2, 3], [4, 5, 6]]
img2_path = "dataset/img1.jpg"
with pytest.raises(
ValueError,
match="When passing img1_path as a list, ensure that all its items are of type float",
):
_ = DeepFace.verify(img1_path=img1_embeddings, img2_path=img2_path)
logger.info("β
test verify for nested embeddings is done")
|