123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165 |
- #!/usr/bin/env python3
-
- # Match camera against existing faces, or record a new face.
-
- import cv2
- import numpy as np
- import face_recognition
- import os
- import time
- import argparse
-
- def match(paths, cap, show, delay):
- faces = []
- faceencs = []
- for path in paths:
- face = face_recognition.load_image_file(path)
- faces.append(face)
- encs = face_recognition.face_encodings(face)
- if len(encs) == 0:
- print("Warning: "+path+" has no face!")
- continue
- faceencs.append(encs[0])
-
- if len(faceencs) == 0:
- print("Warning: No valid faces!")
-
- matching = False
-
- tacc = delay
- then = 0
- avg = 128
- while not matching:
- ret, frame = cap.read()
- mean = cv2.mean(frame)[0]
- avg = (avg + mean) / 2
- if mean < avg:
- continue
-
- # delay
- now = time.time() * 1000
- if tacc < delay:
- tacc += now - then
- then = now
- continue
- else:
- tacc = 0
- then = now
-
- scale = 1
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
- small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)
-
- framelocs = face_recognition.face_locations(small_rgb_frame)
- frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)
-
- # Loop through each face in this frame of video
- for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
- # See if the face is a match for the known face(s)
- dists = face_recognition.face_distance(faceencs, frameenc)
- dist = dists[0]
- for d in dists:
- if d < dist:
- dist = d
- print("Distance: "+str(dist))
-
- if show:
- cv2.rectangle(
- rgb_frame,
- (int(left / scale), int(top / scale)),
- (int(right / scale), int(bottom / scale)),
- (0, 0, 255), 2)
-
- # If a match was found in known_face_encodings, just use the first one.
- if dist <= 0.4:
- matching = True
-
- if show:
- cv2.imshow("frame", rgb_frame)
-
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
-
- # When everything done, release the capture
- cap.release()
- cv2.destroyAllWindows()
-
- if matching:
- print("Matches")
- exit(0)
- else:
- exit(1)
-
- def record(path, cap):
- def draw_face_rec(name, frame):
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
- framelocs = face_recognition.face_locations(rgb_frame)
- frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
- for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
- cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)
-
- cv2.imshow(name, rgb_frame)
-
- avg = 128
- while True:
- ret, frame = cap.read()
- mean = cv2.mean(frame)[0]
- avg = (avg + mean) / 2
- if mean < avg:
- continue
-
- cv2.imshow("frame", frame)
-
- key = cv2.waitKey(1) & 0xFF
- if key == ord('q'):
- break
- elif key == ord('\r'):
- cv2.imshow("frame", frame)
- cv2.waitKey(1)
- draw_face_rec("frame", frame)
- while True:
- key = cv2.waitKey(0) & 0xFF
- if key == ord('\r'):
- cv2.imwrite(path, frame)
- return
- elif key == 27: # esc
- break
-
- parser = argparse.ArgumentParser()
- subs = parser.add_subparsers(dest="command", required=True)
-
- sub_match = subs.add_parser("match")
- sub_match.add_argument(
- "-d", "--device", type=int, default=0,
- help="the index of the video device")
- sub_match.add_argument(
- "-s", "--show", default=False, action="store_true",
- help="show what the camera sees")
- sub_match.add_argument(
- "-w", "--wait", type=str, default=None,
- help="wait for newline on stdin")
- sub_match.add_argument(
- "-t", "--delay", type=int, default=0,
- help="wait n milliseconds between each frame")
- sub_match.add_argument(
- "faces", type=str, nargs="+",
- help="the source image file(s)")
-
- sub_record = subs.add_parser("record")
- sub_record.add_argument(
- "-d", "--device", type=int, default=0,
- help="the index of the video device")
- sub_record.add_argument(
- "face", type=str,
- help="the destination image file")
-
- args = parser.parse_args()
-
- if args.command == "match":
- if args.wait:
- input("Waiting for newline...")
- print("Got newline.");
-
- match(args.faces, cv2.VideoCapture(args.device), args.show, args.delay)
- elif args.command == "record":
- record(args.face, cv2.VideoCapture(args.device))
|