You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

facematcher.py 4.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. #!/usr/bin/env python3
  2. # Match camera against existing faces, or record a new face.
  3. import cv2
  4. import numpy as np
  5. import face_recognition
  6. import os
  7. import time
  8. import argparse
  9. def match(paths, cap, show, delay):
  10. faces = []
  11. faceencs = []
  12. for path in paths:
  13. face = face_recognition.load_image_file(path)
  14. faces.append(face)
  15. encs = face_recognition.face_encodings(face)
  16. if len(encs) == 0:
  17. print("Warning: "+path+" has no face!")
  18. continue
  19. faceencs.append(encs[0])
  20. if len(faceencs) == 0:
  21. print("Warning: No valid faces!")
  22. matching = False
  23. tacc = 0
  24. then = time.time() * 1000
  25. while not matching:
  26. ret, frame = cap.read()
  27. if cv2.mean(frame)[0] < 30:
  28. continue
  29. # delay
  30. now = time.time() * 1000
  31. if tacc < delay:
  32. tacc += now - then
  33. then = now
  34. continue
  35. else:
  36. tacc = 0
  37. then = now
  38. scale = 1
  39. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  40. small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)
  41. framelocs = face_recognition.face_locations(small_rgb_frame)
  42. frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)
  43. # Loop through each face in this frame of video
  44. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  45. # See if the face is a match for the known face(s)
  46. dists = face_recognition.face_distance(faceencs, frameenc)
  47. dist = dists[0]
  48. for d in dists:
  49. if d < dist:
  50. dist = d
  51. print("Distance: "+str(dist))
  52. if show:
  53. cv2.rectangle(
  54. rgb_frame,
  55. (int(left / scale), int(top / scale)),
  56. (int(right / scale), int(bottom / scale)),
  57. (0, 0, 255), 2)
  58. # If a match was found in known_face_encodings, just use the first one.
  59. if dist <= 0.4:
  60. matching = True
  61. if show:
  62. cv2.imshow("frame", rgb_frame)
  63. if cv2.waitKey(1) & 0xFF == ord('q'):
  64. break
  65. # When everything done, release the capture
  66. cap.release()
  67. cv2.destroyAllWindows()
  68. if matching:
  69. print("Matches")
  70. exit(0)
  71. else:
  72. exit(1)
  73. def record(path, cap):
  74. def draw_face_rec(name, frame):
  75. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  76. framelocs = face_recognition.face_locations(rgb_frame)
  77. frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
  78. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  79. cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)
  80. cv2.imshow(name, rgb_frame)
  81. while True:
  82. ret, frame = cap.read()
  83. if cv2.mean(frame)[0] < 30:
  84. continue
  85. cv2.imshow("frame", frame)
  86. key = cv2.waitKey(1) & 0xFF
  87. if key == ord('q'):
  88. break
  89. elif key == ord('\r'):
  90. cv2.imshow("frame", frame)
  91. cv2.waitKey(1)
  92. draw_face_rec("frame", frame)
  93. while True:
  94. key = cv2.waitKey(0) & 0xFF
  95. if key == ord('\r'):
  96. cv2.imwrite(path, frame)
  97. return
  98. elif key == 27: # esc
  99. break
  100. parser = argparse.ArgumentParser()
  101. subs = parser.add_subparsers(dest="command", required=True)
  102. sub_match = subs.add_parser("match")
  103. sub_match.add_argument(
  104. "-d", "--device", type=int, default=0,
  105. help="the index of the video device")
  106. sub_match.add_argument(
  107. "-s", "--show", default=False, action="store_true",
  108. help="show what the camera sees")
  109. sub_match.add_argument(
  110. "-w", "--waitfor", type=str, default=None,
  111. help="wait for newline on stdin")
  112. sub_match.add_argument(
  113. "-t", "--delay", type=int, default=0,
  114. help="wait n milliseconds between each frame")
  115. sub_match.add_argument(
  116. "faces", type=str, nargs="+",
  117. help="the source image file(s)")
  118. sub_record = subs.add_parser("record")
  119. sub_record.add_argument(
  120. "-d", "--device", type=int, default=0,
  121. help="the index of the video device")
  122. sub_record.add_argument(
  123. "face", type=str,
  124. help="the destination image file")
  125. args = parser.parse_args()
  126. if args.command == "match":
  127. print(args)
  128. if args.waitfor is not None:
  129. s = input("Waiting for '"+args.waitfor+"'...")
  130. if s != args.waitfor:
  131. print("Exiting because stdin was '"+s+"' and expected '"+args.waitfor+"'.")
  132. exit(1)
  133. else:
  134. print("Got '"+s+"'.")
  135. match(args.faces, cv2.VideoCapture(args.device), args.show, args.delay)
  136. elif args.command == "record":
  137. record(args.face, cv2.VideoCapture(args.device))