You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

facematcher.py 4.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. #!/usr/bin/env python3
  2. # Match camera against existing faces, or record a new face.
  3. import cv2
  4. import numpy as np
  5. import face_recognition
  6. import os
  7. import time
  8. import argparse
  9. def match(paths, cap, show, delay):
  10. faces = []
  11. faceencs = []
  12. for path in paths:
  13. face = face_recognition.load_image_file(path)
  14. faces.append(face)
  15. encs = face_recognition.face_encodings(face)
  16. if len(encs) == 0:
  17. print("Warning: "+path+" has no face!")
  18. continue
  19. faceencs.append(encs[0])
  20. if len(faceencs) == 0:
  21. print("Warning: No valid faces!")
  22. matching = False
  23. tacc = delay
  24. then = 0
  25. avg = 128
  26. while not matching:
  27. ret, frame = cap.read()
  28. mean = cv2.mean(frame)[0]
  29. avg = (avg + mean) / 2
  30. if mean < avg:
  31. continue
  32. # delay
  33. now = time.time() * 1000
  34. if tacc < delay:
  35. tacc += now - then
  36. then = now
  37. continue
  38. else:
  39. tacc = 0
  40. then = now
  41. scale = 1
  42. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  43. small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)
  44. framelocs = face_recognition.face_locations(small_rgb_frame)
  45. frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)
  46. # Loop through each face in this frame of video
  47. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  48. # See if the face is a match for the known face(s)
  49. dists = face_recognition.face_distance(faceencs, frameenc)
  50. dist = dists[0]
  51. for d in dists:
  52. if d < dist:
  53. dist = d
  54. print("Distance: "+str(dist))
  55. if show:
  56. cv2.rectangle(
  57. rgb_frame,
  58. (int(left / scale), int(top / scale)),
  59. (int(right / scale), int(bottom / scale)),
  60. (0, 0, 255), 2)
  61. # If a match was found in known_face_encodings, just use the first one.
  62. if dist <= 0.4:
  63. matching = True
  64. if show:
  65. cv2.imshow("frame", rgb_frame)
  66. if cv2.waitKey(1) & 0xFF == ord('q'):
  67. break
  68. # When everything done, release the capture
  69. cap.release()
  70. cv2.destroyAllWindows()
  71. if matching:
  72. print("Matches")
  73. exit(0)
  74. else:
  75. exit(1)
  76. def record(path, cap):
  77. def draw_face_rec(name, frame):
  78. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  79. framelocs = face_recognition.face_locations(rgb_frame)
  80. frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
  81. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  82. cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)
  83. cv2.imshow(name, rgb_frame)
  84. avg = 128
  85. while True:
  86. ret, frame = cap.read()
  87. mean = cv2.mean(frame)[0]
  88. avg = (avg + mean) / 2
  89. if mean < avg:
  90. continue
  91. cv2.imshow("frame", frame)
  92. key = cv2.waitKey(1) & 0xFF
  93. if key == ord('q'):
  94. break
  95. elif key == ord('\r'):
  96. cv2.imshow("frame", frame)
  97. cv2.waitKey(1)
  98. draw_face_rec("frame", frame)
  99. while True:
  100. key = cv2.waitKey(0) & 0xFF
  101. if key == ord('\r'):
  102. cv2.imwrite(path, frame)
  103. return
  104. elif key == 27: # esc
  105. break
  106. parser = argparse.ArgumentParser()
  107. subs = parser.add_subparsers(dest="command", required=True)
  108. sub_match = subs.add_parser("match")
  109. sub_match.add_argument(
  110. "-d", "--device", type=int, default=0,
  111. help="the index of the video device")
  112. sub_match.add_argument(
  113. "-s", "--show", default=False, action="store_true",
  114. help="show what the camera sees")
  115. sub_match.add_argument(
  116. "-w", "--wait", type=str, default=None,
  117. help="wait for newline on stdin")
  118. sub_match.add_argument(
  119. "-t", "--delay", type=int, default=0,
  120. help="wait n milliseconds between each frame")
  121. sub_match.add_argument(
  122. "faces", type=str, nargs="+",
  123. help="the source image file(s)")
  124. sub_record = subs.add_parser("record")
  125. sub_record.add_argument(
  126. "-d", "--device", type=int, default=0,
  127. help="the index of the video device")
  128. sub_record.add_argument(
  129. "face", type=str,
  130. help="the destination image file")
  131. args = parser.parse_args()
  132. if args.command == "match":
  133. if args.wait:
  134. input("Waiting for newline...")
  135. print("Got newline.");
  136. match(args.faces, cv2.VideoCapture(args.device), args.show, args.delay)
  137. elif args.command == "record":
  138. record(args.face, cv2.VideoCapture(args.device))