Browse Source

yay

master
Martin Dørum 5 years ago
parent
commit
b90f6b3699
2 changed files with 92 additions and 45 deletions
  1. 36
    0
      facelock.sh
  2. 56
    45
      facematcher.py

+ 36
- 0
facelock.sh View File

@@ -0,0 +1,36 @@
#!/bin/sh

locker() {
while ! mlock; do
echo "mlock died!!! $?"
sleep 0.5
done
}

matcher() {
while ! ./facematcher.py match -d 2 faces/martin/*; do
echo "facematcher died!!! $?"
sleep 2
done
}

trap 'kill $(jobs -p)' EXIT

locker &
lockerpid=$!
matcher &
matcherpid=$!

while :; do
if ! kill -0 $lockerpid; then
kill $matcherpid
wait
exit 0
elif ! kill -0 $matcherpid; then
kill $lockerpid
pkill i3lock
wait
exit 0
fi
sleep 1
done

+ 56
- 45
facematcher.py View File

@@ -3,16 +3,23 @@ import cv2
import numpy as np
import face_recognition
import os
import sys
import time
import argparse

def match(paths, cap, show):
faces = []
faceencs = []
for path in paths:
face = face_recognition.load_image_file(path)
faces.append(face)
encs = face_recognition.face_encodings(face)
if len(encs) == 0:
print("Warning: "+path+" has no face!")
continue
faceencs.append(encs[0])

def match(path, cap):
face = face_recognition.load_image_file(path)
faceencs = face_recognition.face_encodings(face)
if len(faceencs) == 0:
print("Oh noes, bad face!")
exit(1)
faceenc = faceencs[0]
print("Warning: No valid faces!")

matching = False

@@ -31,20 +38,26 @@ def match(path, cap):
# Loop through each face in this frame of video
for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
# See if the face is a match for the known face(s)
dist = face_recognition.face_distance([faceenc], frameenc)[0]
dists = face_recognition.face_distance(faceencs, frameenc)
dist = dists[0]
for d in dists:
if d < dist:
dist = d
print("Distance: "+str(dist))

cv2.rectangle(
rgb_frame,
(int(left / scale), int(top / scale)),
(int(right / scale), int(bottom / scale)),
(0, 0, 255), 2)
if show:
cv2.rectangle(
rgb_frame,
(int(left / scale), int(top / scale)),
(int(right / scale), int(bottom / scale)),
(0, 0, 255), 2)

# If a match was found in known_face_encodings, just use the first one.
if dist <= 0.35:
if dist <= 0.4:
matching = True

cv2.imshow("frame", rgb_frame)
if show:
cv2.imshow("frame", rgb_frame)

if cv2.waitKey(1) & 0xFF == ord('q'):
break
@@ -91,33 +104,31 @@ def record(path, cap):
elif key == 27: # esc
break


def usage(argv0):
print("Usage: "+argv0+" match <path> [cam ID]")
print(" "+argv0+" record <path> [cam ID]")

if len(sys.argv) < 2:
usage(sys.argv[0])
exit(1)

if sys.argv[1] == "match":
if len(sys.argv) == 3:
capid = 2
elif len(sys.argv) == 4:
capid = int(sys.argv[3])
else:
usage(sys.argv[0])
exit(1)
match(sys.argv[2], cv2.VideoCapture(capid))
elif sys.argv[1] == "record":
if len(sys.argv) == 3:
capid = 2
elif len(sys.argv) == 4:
capid = int(sys.argv[3])
else:
usage(sys.argv[0])
exit(1)
record(sys.argv[2], cv2.VideoCapture(capid))
else:
usage(sys.argv[0])
exit(1)
parser = argparse.ArgumentParser()
subs = parser.add_subparsers(dest="command", required=True)

sub_match = subs.add_parser("match")
sub_match.add_argument(
"-d", "--device", type=int, default=0,
help="the index of the video device")
sub_match.add_argument(
"-s", "--show", default=False, action="store_true",
help="show what the camera sees")
sub_match.add_argument(
"faces", type=str, nargs="+",
help="the source image file(s)")

sub_record = subs.add_parser("record")
sub_record.add_argument(
"-d", "--device", type=int, default=0,
help="the index of the video device")
sub_record.add_argument(
"face", type=str,
help="the destination image file")

args = parser.parse_args()

if args.command == "match":
match(args.faces, cv2.VideoCapture(args.device), args.show)
elif args.command == "record":
record(args.face, cv2.VideoCapture(args.device))

Loading…
Cancel
Save