|  | 
|  | 1 | +#python project skinned with multiple faces and without gray scale | 
|  | 2 | + | 
|  | 3 | + | 
|  | 4 | +import cv2 | 
|  | 5 | + | 
|  | 6 | +# Define the paths of the target images | 
|  | 7 | +target_image_paths = [ | 
|  | 8 | +'C:\\Users\\Kushagra pathak\\Desktop\\python\\testimg\\765f28e4-33d3-4a9c-be0e-4bc9e6cee4fe.jfif', | 
|  | 9 | +    'C:\\Users\\Kushagra pathak\\Desktop\\python\\testimg\\93c4ec94-5e2e-4e44-9baf-6f7f43eeb5c7.jfif', | 
|  | 10 | +    'C:\\Users\\Kushagra pathak\\Desktop\\python\\testimg\\d612eb6b-089c-4d91-b435-a03d51e01adf.jfif', | 
|  | 11 | +     # Add more image paths as needed | 
|  | 12 | +] | 
|  | 13 | + | 
|  | 14 | +# Create a list to store the target images and their descriptors | 
|  | 15 | +target_images = [] | 
|  | 16 | +target_keypoints = [] | 
|  | 17 | +target_descriptors = [] | 
|  | 18 | + | 
|  | 19 | +# Create a feature detector | 
|  | 20 | +orb = cv2.ORB_create() | 
|  | 21 | + | 
|  | 22 | +# Load the target images and compute descriptors | 
|  | 23 | +for path in target_image_paths: | 
|  | 24 | +    target_image = cv2.imread(path) | 
|  | 25 | +    keypoints_target, descriptors_target = orb.detectAndCompute(target_image, None) | 
|  | 26 | +    if descriptors_target is not None: | 
|  | 27 | +        target_images.append(target_image) | 
|  | 28 | +        target_keypoints.append(keypoints_target) | 
|  | 29 | +        target_descriptors.append(descriptors_target) | 
|  | 30 | + | 
|  | 31 | +# Create a feature matcher | 
|  | 32 | +bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) | 
|  | 33 | + | 
|  | 34 | +# Initialize the camera | 
|  | 35 | +camera = cv2.VideoCapture(0) | 
|  | 36 | + | 
|  | 37 | +while True: | 
|  | 38 | +    # Capture frame-by-frame | 
|  | 39 | +    ret, frame = camera.read() | 
|  | 40 | + | 
|  | 41 | +    # Detect features in the frame | 
|  | 42 | +    keypoints_frame, descriptors_frame = orb.detectAndCompute(frame, None) | 
|  | 43 | + | 
|  | 44 | +    best_match_idx = None  # Index of the best matching target image | 
|  | 45 | +    best_match_distance = float('inf')  # Initial distance set to infinity | 
|  | 46 | + | 
|  | 47 | +    # Match the features between the frame and each target image | 
|  | 48 | +    for i, descriptors_target in enumerate(target_descriptors): | 
|  | 49 | +        matches = bf.match(descriptors_frame, descriptors_target) | 
|  | 50 | +        matches = sorted(matches, key=lambda x: x.distance) | 
|  | 51 | + | 
|  | 52 | +        # Check if the current target image has a better match | 
|  | 53 | +        if matches[0].distance < best_match_distance: | 
|  | 54 | +            best_match_idx = i | 
|  | 55 | +            best_match_distance = matches[0].distance | 
|  | 56 | + | 
|  | 57 | +    if best_match_idx is not None: | 
|  | 58 | +        # Draw the best match on the frame | 
|  | 59 | +        matched_frame = cv2.drawMatches( | 
|  | 60 | +            frame, keypoints_frame, | 
|  | 61 | +            target_images[best_match_idx], target_keypoints[best_match_idx], [matches[0]], None, flags=2 | 
|  | 62 | +        ) | 
|  | 63 | +        cv2.imshow('Object Detection', matched_frame) | 
|  | 64 | +    else: | 
|  | 65 | +        cv2.imshow('Object Detection', frame) | 
|  | 66 | + | 
|  | 67 | +    # Check for the 'q' key to exit the program | 
|  | 68 | +    if cv2.waitKey(1) & 0xFF == ord('q'): | 
|  | 69 | +        break | 
|  | 70 | + | 
|  | 71 | +# Release the camera and close all windows | 
|  | 72 | +camera.release() | 
|  | 73 | +cv2.destroyAllWindows() | 
0 commit comments