| #!/usr/bin/env python3 | |||||
| import cv2 | |||||
| import numpy as np | |||||
| import face_recognition | |||||
| import os | |||||
| import sys | |||||
| import time | |||||
| def match(path, cap): | |||||
| face = face_recognition.load_image_file(path) | |||||
| faceencs = face_recognition.face_encodings(face) | |||||
| if len(faceencs) == 0: | |||||
| print("Oh noes, bad face!") | |||||
| exit(1) | |||||
| faceenc = faceencs[0] | |||||
| matching = False | |||||
| while not matching: | |||||
| ret, frame = cap.read() | |||||
| if cv2.mean(frame)[0] < 30: | |||||
| continue | |||||
| scale = 0.25 | |||||
| rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) | |||||
| small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale) | |||||
| framelocs = face_recognition.face_locations(small_rgb_frame) | |||||
| frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs) | |||||
| # Loop through each face in this frame of video | |||||
| for (top, right, bottom, left), frameenc in zip(framelocs, frameencs): | |||||
| # See if the face is a match for the known face(s) | |||||
| dist = face_recognition.face_distance([faceenc], frameenc)[0] | |||||
| print("Distance: "+str(dist)) | |||||
| cv2.rectangle( | |||||
| rgb_frame, | |||||
| (int(left / scale), int(top / scale)), | |||||
| (int(right / scale), int(bottom / scale)), | |||||
| (0, 0, 255), 2) | |||||
| # If a match was found in known_face_encodings, just use the first one. | |||||
| if dist <= 0.35: | |||||
| matching = True | |||||
| cv2.imshow("frame", rgb_frame) | |||||
| if cv2.waitKey(1) & 0xFF == ord('q'): | |||||
| break | |||||
| # When everything done, release the capture | |||||
| cap.release() | |||||
| cv2.destroyAllWindows() | |||||
| if matching: | |||||
| print("Matches") | |||||
| exit(0) | |||||
| else: | |||||
| exit(1) | |||||
| def record(path, cap): | |||||
| def draw_face_rec(name, frame): | |||||
| rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) | |||||
| framelocs = face_recognition.face_locations(rgb_frame) | |||||
| frameencs = face_recognition.face_encodings(rgb_frame, framelocs) | |||||
| for (top, right, bottom, left), frameenc in zip(framelocs, frameencs): | |||||
| cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2) | |||||
| cv2.imshow(name, rgb_frame) | |||||
| while True: | |||||
| ret, frame = cap.read() | |||||
| if cv2.mean(frame)[0] < 30: | |||||
| continue | |||||
| cv2.imshow("frame", frame) | |||||
| key = cv2.waitKey(1) & 0xFF | |||||
| if key == ord('q'): | |||||
| break | |||||
| elif key == ord('\r'): | |||||
| cv2.imshow("frame", frame) | |||||
| cv2.waitKey(1) | |||||
| draw_face_rec("frame", frame) | |||||
| while True: | |||||
| key = cv2.waitKey(0) & 0xFF | |||||
| if key == ord('\r'): | |||||
| cv2.imwrite(path, frame) | |||||
| return | |||||
| elif key == 27: # esc | |||||
| break | |||||
| def usage(argv0): | |||||
| print("Usage: "+argv0+" match <path> [cam ID]") | |||||
| print(" "+argv0+" record <path> [cam ID]") | |||||
| if len(sys.argv) < 2: | |||||
| usage(sys.argv[0]) | |||||
| exit(1) | |||||
| if sys.argv[1] == "match": | |||||
| if len(sys.argv) == 3: | |||||
| capid = 2 | |||||
| elif len(sys.argv) == 4: | |||||
| capid = int(sys.argv[3]) | |||||
| else: | |||||
| usage(sys.argv[0]) | |||||
| exit(1) | |||||
| match(sys.argv[2], cv2.VideoCapture(capid)) | |||||
| elif sys.argv[1] == "record": | |||||
| if len(sys.argv) == 3: | |||||
| capid = 2 | |||||
| elif len(sys.argv) == 4: | |||||
| capid = int(sys.argv[3]) | |||||
| else: | |||||
| usage(sys.argv[0]) | |||||
| exit(1) | |||||
| record(sys.argv[2], cv2.VideoCapture(capid)) | |||||
| else: | |||||
| usage(sys.argv[0]) | |||||
| exit(1) |