You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

facematcher.py 4.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #!/usr/bin/env python3
  2. # Match camera against existing faces, or record a new face.
  3. import cv2
  4. import numpy as np
  5. import face_recognition
  6. import os
  7. import time
  8. import argparse
  9. def match(paths, cap, show):
  10. faces = []
  11. faceencs = []
  12. for path in paths:
  13. face = face_recognition.load_image_file(path)
  14. faces.append(face)
  15. encs = face_recognition.face_encodings(face)
  16. if len(encs) == 0:
  17. print("Warning: "+path+" has no face!")
  18. continue
  19. faceencs.append(encs[0])
  20. if len(faceencs) == 0:
  21. print("Warning: No valid faces!")
  22. matching = False
  23. while not matching:
  24. ret, frame = cap.read()
  25. if cv2.mean(frame)[0] < 30:
  26. continue
  27. scale = 1
  28. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  29. small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)
  30. framelocs = face_recognition.face_locations(small_rgb_frame)
  31. frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)
  32. # Loop through each face in this frame of video
  33. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  34. # See if the face is a match for the known face(s)
  35. dists = face_recognition.face_distance(faceencs, frameenc)
  36. dist = dists[0]
  37. for d in dists:
  38. if d < dist:
  39. dist = d
  40. print("Distance: "+str(dist))
  41. if show:
  42. cv2.rectangle(
  43. rgb_frame,
  44. (int(left / scale), int(top / scale)),
  45. (int(right / scale), int(bottom / scale)),
  46. (0, 0, 255), 2)
  47. # If a match was found in known_face_encodings, just use the first one.
  48. if dist <= 0.4:
  49. matching = True
  50. if show:
  51. cv2.imshow("frame", rgb_frame)
  52. if cv2.waitKey(1) & 0xFF == ord('q'):
  53. break
  54. # When everything done, release the capture
  55. cap.release()
  56. cv2.destroyAllWindows()
  57. if matching:
  58. print("Matches")
  59. exit(0)
  60. else:
  61. exit(1)
  62. def record(path, cap):
  63. def draw_face_rec(name, frame):
  64. rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
  65. framelocs = face_recognition.face_locations(rgb_frame)
  66. frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
  67. for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
  68. cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)
  69. cv2.imshow(name, rgb_frame)
  70. while True:
  71. ret, frame = cap.read()
  72. if cv2.mean(frame)[0] < 30:
  73. continue
  74. cv2.imshow("frame", frame)
  75. key = cv2.waitKey(1) & 0xFF
  76. if key == ord('q'):
  77. break
  78. elif key == ord('\r'):
  79. cv2.imshow("frame", frame)
  80. cv2.waitKey(1)
  81. draw_face_rec("frame", frame)
  82. while True:
  83. key = cv2.waitKey(0) & 0xFF
  84. if key == ord('\r'):
  85. cv2.imwrite(path, frame)
  86. return
  87. elif key == 27: # esc
  88. break
  89. parser = argparse.ArgumentParser()
  90. subs = parser.add_subparsers(dest="command", required=True)
  91. sub_match = subs.add_parser("match")
  92. sub_match.add_argument(
  93. "-d", "--device", type=int, default=0,
  94. help="the index of the video device")
  95. sub_match.add_argument(
  96. "-s", "--show", default=False, action="store_true",
  97. help="show what the camera sees")
  98. sub_match.add_argument(
  99. "-w", "--wait", default=False, action="store_true",
  100. help="wait for newline on stdin")
  101. sub_match.add_argument(
  102. "faces", type=str, nargs="+",
  103. help="the source image file(s)")
  104. sub_record = subs.add_parser("record")
  105. sub_record.add_argument(
  106. "-d", "--device", type=int, default=0,
  107. help="the index of the video device")
  108. sub_record.add_argument(
  109. "face", type=str,
  110. help="the destination image file")
  111. args = parser.parse_args()
  112. if args.command == "match":
  113. if args.wait:
  114. input("Waiting for newline...")
  115. match(args.faces, cv2.VideoCapture(args.device), args.show)
  116. elif args.command == "record":
  117. record(args.face, cv2.VideoCapture(args.device))