You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

facematcher.py 4.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #!/usr/bin/env python3
  2. # Match camera against existing faces, or record a new face.
  3. import cv2
  4. import numpy as np
  5. import face_recognition
  6. import os
  7. import time
  8. import argparse
  9. def match(paths, dev, show, delay, wait):
  10. faces = []
  11. faceencs = []
  12. for path in paths:
  13. print(f"reading {path}")
  14. face = face_recognition.load_image_file(path)
  15. faces.append(face)
  16. encs = face_recognition.face_encodings(face)
  17. if len(encs) == 0:
  18. print("Warning: "+path+" has no face!")
  19. continue
  20. faceencs.append(encs[0])
  21. if len(faceencs) == 0:
  22. print("Warning: No valid faces!")
  23. if args.wait:
  24. input("Waiting for newline...")
  25. print("Got newline.");
  26. cap = cv2.VideoCapture(dev)
  27. matching = False
  28. tacc = delay
  29. then = 0
  30. avg = 128
  31. while not matching:
  32. ret, frame = cap.read()
  33. mean = cv2.mean(frame)[0]
  34. avg = (avg + mean) / 2
  35. if mean < avg:
  36. continue
  37. # delay
  38. now = time.time() * 1000
  39. if tacc < delay:
  40. tacc += now - then
  41. then = now
  42. continue
  43. else:
  44. tacc = 0
  45. then = now
  46. scale = 1
  47. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  48. small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)
  49. print("hey")
  50. framelocs = face_recognition.face_locations(small_rgb_frame)
  51. frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)
  52. # Loop through each face in this frame of video
  53. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  54. # See if the face is a match for the known face(s)
  55. dists = face_recognition.face_distance(faceencs, frameenc)
  56. dist = dists[0]
  57. distidx = 0
  58. for i, d in enumerate(dists):
  59. if d < dist:
  60. dist = d
  61. distidx = i
  62. print(f"Distance: {dist} ({paths[distidx]})")
  63. if show:
  64. cv2.rectangle(
  65. rgb_frame,
  66. (int(left / scale), int(top / scale)),
  67. (int(right / scale), int(bottom / scale)),
  68. (0, 0, 255), 2)
  69. # If a match was found in known_face_encodings, just use the first one.
  70. if dist <= 0.4:
  71. matching = True
  72. if show:
  73. cv2.imshow("frame", rgb_frame)
  74. if cv2.waitKey(1) & 0xFF == ord('q'):
  75. break
  76. # When everything done, release the capture
  77. cap.release()
  78. cv2.destroyAllWindows()
  79. if matching:
  80. print("Matches")
  81. exit(0)
  82. else:
  83. exit(1)
  84. def record(path, dev):
  85. def draw_face_rec(name, frame):
  86. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  87. framelocs = face_recognition.face_locations(rgb_frame)
  88. frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
  89. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  90. cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)
  91. cv2.imshow(name, rgb_frame)
  92. cap = cv2.VideoCapture(dev)
  93. avg = 128
  94. while True:
  95. ret, frame = cap.read()
  96. mean = cv2.mean(frame)[0]
  97. avg = (avg + mean) / 2
  98. if mean < avg:
  99. continue
  100. cv2.imshow("frame", frame)
  101. key = cv2.waitKey(1) & 0xFF
  102. if key == ord('q'):
  103. break
  104. elif key == ord('\r'):
  105. cv2.imshow("frame", frame)
  106. cv2.waitKey(1)
  107. draw_face_rec("frame", frame)
  108. while True:
  109. key = cv2.waitKey(0) & 0xFF
  110. if key == ord('\r'):
  111. cv2.imwrite(path, frame)
  112. return
  113. elif key == 27: # esc
  114. break
  115. parser = argparse.ArgumentParser()
  116. subs = parser.add_subparsers(dest="command", required=True)
  117. sub_match = subs.add_parser("match")
  118. sub_match.add_argument(
  119. "-d", "--device", type=int, default=0,
  120. help="the index of the video device")
  121. sub_match.add_argument(
  122. "-s", "--show", default=False, action="store_true",
  123. help="show what the camera sees")
  124. sub_match.add_argument(
  125. "-w", "--wait", default=False, action="store_true",
  126. help="wait for newline on stdin")
  127. sub_match.add_argument(
  128. "-t", "--delay", type=int, default=0,
  129. help="wait n milliseconds between each frame")
  130. sub_match.add_argument(
  131. "faces", type=str, nargs="+",
  132. help="the source image file(s)")
  133. sub_record = subs.add_parser("record")
  134. sub_record.add_argument(
  135. "-d", "--device", type=int, default=0,
  136. help="the index of the video device")
  137. sub_record.add_argument(
  138. "face", type=str,
  139. help="the destination image file")
  140. args = parser.parse_args()
  141. if args.command == "match":
  142. match(args.faces, args.device, args.show, args.delay, args.wait)
  143. elif args.command == "record":
  144. record(args.face, args.device)