import gradio as gr from transformers import pipeline from PIL import Image import cv2 import numpy as np import os # Boolean to control whether to remove images or not REMOVE_IMAGES = True # Function to classify the face shape def classify_face_shape(image): # Initialize the pipeline pipe = pipeline("image-classification", model="metadome/face_shape_classification") output = pipe("face_region.jpg") # use the face_region image instead # Log the output for debugging print("Pipeline output for shape:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_age(image): pipe = pipeline("image-classification", model="nateraw/vit-age-classifier") output = pipe("face_region.jpg") # use the face_region image instead print("Pipeline output for age:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_skin_type(image): pipe = pipeline("image-classification", model="dima806/skin_types_image_detection") #pipe = pipeline("image-classification", model="justingrammens/resnet50-skin-classification") output = pipe("face_region.jpg") # use the face_region image instead print("Pipeline output for skin_type:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_acne_type(image): pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-acne") #pipe = pipeline("image-classification", model="afscomercial/dermatologic") output = pipe("face_region.jpg") # use the face_region image instead print("Pipeline output for acne:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_hair_color(image): #pipe = pipeline("image-classification", model="enzostvs/hair-color") pipe = pipeline("image-classification", model="londe33/hair_v02") # Run the pipeline on the uploaded image output = pipe("uploaded_image.jpg") print("Pipeline output for hair color:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_eye_shape(image): pipe = pipeline("image-classification", model="justingrammens/eye-shape") output = pipe("eye_regions.jpg") # use the eye_regions image instead print("Pipeline output for eye shape:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_eye_color(image): pipe = pipeline("image-classification", model="justingrammens/eye-color") output = pipe("eye_regions.jpg") #use the eye_regions image instead print("Pipeline output for eye color:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def process_gradio_image(pil_image): # Convert PIL image to NumPy array image = np.array(pil_image) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB (from PIL) to BGR (OpenCV default) return image def classify_race(image): pipe = pipeline("image-classification", model="cledoux42/Ethnicity_Test_v003") output = pipe("face_region.jpg") # use the face_region image instead # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} print(formatted_output) return formatted_output def classify_gender(image): pipe = pipeline("image-classification", model="rizvandwiki/gender-classification") output = pipe("uploaded_image.jpg") # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def classify_wrinkles(image): pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-wrinkles") output = pipe("forehead_and_eyes.jpg") # use the forehead_and_eyes image instead print("Pipeline output for wrinkles:", output) # Format the output to be compatible with gr.outputs.Label formatted_output = {item['label']: item['score'] for item in output} return formatted_output def remove_prior_segmented_face(): # Remove all the images that were created image_files = ["segmented_face.jpg"] for image_file in image_files: if os.path.exists(image_file): os.remove(image_file) def classify_image_with_multiple_models(image): try: image.save("uploaded_image.jpg") remove_prior_segmented_face() faces = find_faces(image) create_face_and_eye_region(image, faces) face_shape_result = classify_face_shape(image) age_result = classify_age(image) skin_type_result = classify_skin_type(image) acne_results = classify_acne_type(image) hair_color_results = classify_hair_color(image) eye_shape = classify_eye_shape(image) eye_color = classify_eye_color(image) race = classify_race(image) gender = classify_gender(image) wrinkles = classify_wrinkles(image) except Exception as e: return str(e), str(e), str(e), str(e), str(e), str(e), str(e), str(e), str(e), str(e), Image.open("uploaded_image.jpg") finally: if REMOVE_IMAGES: image_files = ["uploaded_image.jpg", "face_region.jpg", "forehead_and_eyes.jpg", "eye_regions.jpg"] for image_file in image_files: if os.path.exists(image_file): os.remove(image_file) return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color, race, gender, wrinkles, Image.open("segmented_face.jpg") def find_faces(image): # Load the pre-trained face detector face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') gradio_image = process_gradio_image(image) # Convert the image to grayscale, openCV uses BGR format gray = cv2.cvtColor(gradio_image, cv2.COLOR_BGR2GRAY) # Detect faces in the image # Will need to do more testing to determine the best parameters faces = face_cascade.detectMultiScale(gray, 1.3, 5) #faces = face_cascade.detectMultiScale(gray, 1.2, 5) #faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3) print("Number of faces found:", len(faces)) if len(faces) == 0: raise ValueError("No faces found in the image.") else: return faces def create_face_and_eye_region(image, faces): eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml') gradio_image = process_gradio_image(image) # Convert the image to grayscale, openCV uses BGR format gray = cv2.cvtColor(gradio_image, cv2.COLOR_BGR2GRAY) for (x, y, w, h) in faces: # Draw a rectangle around the face cv2.rectangle(gradio_image, (x, y), (x + w, y + h), (255, 0, 0), 2) # Extract the face region face_roi = gradio_image[y:y + h, x:x + w] cv2.imwrite('face_region.jpg', face_roi) # Extract the forehead region forehead_height = int(h * 0.25) # Define the height of the forehead region as 25% of the face height eyes_height = int(h * 0.25) # Define the height of the eyes region as 25% of the face height # Extract the forehead and eyes region together forehead_and_eyes_height = forehead_height + eyes_height forehead_and_eyes_roi = face_roi[0:forehead_and_eyes_height, 0:w] cv2.imwrite('forehead_and_eyes.jpg', forehead_and_eyes_roi) # Region of Interest (ROI) for the face roi_gray = gray[y:y + h, x:x + w] roi_color = gradio_image[y:y + h, x:x + w] # Detect eyes in the face ROI eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=10, minSize=(20, 20)) eye_positions = [] for (ex, ey, ew, eh) in eyes: # Ensure eyes are within the upper half of the face region if ey + eh < h // 2: eye_positions.append((ex, ey, ew, eh)) for (ex, ey, ew, eh) in eyes: # Draw a rectangle around the eyes cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) # Extract the eye region eye_roi = roi_color[ey:ey + eh, ex:ex + ew] cv2.imwrite('eye_regions.jpg', eye_roi) # Calculate the average color of the eye region avg_color = np.mean(eye_roi, axis=(0, 1)) print("Average color:", avg_color) #color = "NULL" color = classify_eye_color_opencv(avg_color) # If we wish, we can write the eye color on the processed image #cv2.putText(gradio_image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2) cv2.imwrite('segmented_face.jpg', gradio_image) def classify_eye_color_opencv(avg_color): """ Classify eye color based on average BGR values from cv2 image. Args: avg_color: numpy array containing [B, G, R] values Returns: str: classified eye color """ # Convert BGR to RGB (since OpenCV uses BGR) #avg_color = np.mean(avg_color, axis=(0, 1)) b, g, r = avg_color # Define color ranges for each eye color (in BGR) # These thresholds may need adjustment based on your specific lighting conditions # Check brown eyes (darker, red-dominant) if r > g and r > b and r > 100: if g < 90 and b < 90: return "brown" # Check amber eyes (golden-brown) if r > 150 and g > 100 and b < 100: if r > g > b: return "amber" # Check hazel eyes (mix of brown and green) if g > 100 and r > 100 and b < 100: if abs(r - g) < 40: return "hazel" # Check green eyes (green-dominant) if g > r and g > b: if g > 100: return "green" # Check blue eyes (blue-dominant) if b > r and b > g: if b > 100: return "blue" # Check gray eyes (all values similar) if abs(r - g) < 20 and abs(g - b) < 20 and abs(r - b) < 20: if r > 100 and g > 100 and b > 100: return "gray" return "undefined" # Create the Gradio interface demo = gr.Interface( fn=classify_image_with_multiple_models, # The function to run inputs=gr.Image(type="pil"), outputs=[ gr.Label(num_top_classes=5, label="Face Shape"), gr.Label(num_top_classes=5, label="Age"), gr.Label(num_top_classes=3, label="Skin Type"), gr.Label(num_top_classes=5, label="Acne Type"), gr.Label(num_top_classes=5, label="Hair Color"), gr.Label(num_top_classes=4, label="Eye Shape"), gr.Label(num_top_classes=5, label="Eye Color"), gr.Label(num_top_classes=7, label="Race"), gr.Label(num_top_classes=2, label="Gender"), gr.Label(num_top_classes=2, label="Wrinkles"), gr.Image(type="pil", label="Segmented Face", value="segmented_face.jpg") # Provide the path to the image ], title="Multiple Model Classification", description="Upload an image to classify the face using multiple classification models" ) #demo.launch(auth=("admin", "pass1234")) demo.launch()