Sfoglia il codice sorgente

a lot nicer ok

master
Martin Dørum 5 anni fa
parent
commit
6bba375356
6 ha cambiato i file con 203 aggiunte e 239 eliminazioni
  1. 1
    0
      .gitignore
  2. 0
    35
      facelock.py
  3. 0
    172
      facematcher.py
  4. 0
    32
      facewaiter.py
  5. 155
    0
      lock.py
  6. 47
    0
      record.py

+ 1
- 0
.gitignore Vedi File

@@ -0,0 +1 @@
/faces

+ 0
- 35
facelock.py Vedi File

@@ -1,35 +0,0 @@
#!/usr/bin/env python

# Lock with mlock, unlock when face is detected

import subprocess
import threading

locker = subprocess.Popen([ "mlock" ])
print("locker "+str(locker.pid)+" spawned")
matcher = subprocess.Popen([ "./facewaiter.py" ])
print("matcher "+str(matcher.pid)+" spawned")

def waitfor(x, others):
code = x.wait()
if code != 0:
print(str(x.pid)+" died with code "+str(code)+"!")
else:
print(str(x.pid)+" died.")
for other in others:
if other.poll() == None:
print("killing "+str(other.pid))
other.terminate()

threads = [
threading.Thread(target=waitfor, args=(locker, [ matcher ])),
threading.Thread(target=waitfor, args=(matcher, [ locker ])),
]

for th in threads:
th.start()

for th in threads:
th.join()

subprocess.call([ "pkill", "i3lock" ])

+ 0
- 172
facematcher.py Vedi File

@@ -1,172 +0,0 @@
#!/usr/bin/env python3

# Match camera against existing faces, or record a new face.

import cv2
import numpy as np
import face_recognition
import os
import time
import argparse

def match(paths, dev, show, delay, wait):
faces = []
faceencs = []
for path in paths:
print(f"reading {path}")
face = face_recognition.load_image_file(path)
faces.append(face)
encs = face_recognition.face_encodings(face)
if len(encs) == 0:
print("Warning: "+path+" has no face!")
continue
faceencs.append(encs[0])

if len(faceencs) == 0:
print("Warning: No valid faces!")

if args.wait:
input("Waiting for newline...")
print("Got newline.");

cap = cv2.VideoCapture(dev)
matching = False

tacc = delay
then = 0
avg = 128
while not matching:
ret, frame = cap.read()
mean = cv2.mean(frame)[0]
avg = (avg + mean) / 2
if mean < avg:
continue

# delay
now = time.time() * 1000
if tacc < delay:
tacc += now - then
then = now
continue
else:
tacc = 0
then = now

scale = 1
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)

print("hey")
framelocs = face_recognition.face_locations(small_rgb_frame)
frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)

# Loop through each face in this frame of video
for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
# See if the face is a match for the known face(s)
dists = face_recognition.face_distance(faceencs, frameenc)
dist = dists[0]
distidx = 0
for i, d in enumerate(dists):
if d < dist:
dist = d
distidx = i
print(f"Distance: {dist} ({paths[distidx]})")

if show:
cv2.rectangle(
rgb_frame,
(int(left / scale), int(top / scale)),
(int(right / scale), int(bottom / scale)),
(0, 0, 255), 2)

# If a match was found in known_face_encodings, just use the first one.
if dist <= 0.4:
matching = True

if show:
cv2.imshow("frame", rgb_frame)

if cv2.waitKey(1) & 0xFF == ord('q'):
break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

if matching:
print("Matches")
exit(0)
else:
exit(1)

def record(path, dev):
def draw_face_rec(name, frame):
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
framelocs = face_recognition.face_locations(rgb_frame)
frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)

cv2.imshow(name, rgb_frame)

cap = cv2.VideoCapture(dev)

avg = 128
while True:
ret, frame = cap.read()
mean = cv2.mean(frame)[0]
avg = (avg + mean) / 2
if mean < avg:
continue

cv2.imshow("frame", frame)

key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('\r'):
cv2.imshow("frame", frame)
cv2.waitKey(1)
draw_face_rec("frame", frame)
while True:
key = cv2.waitKey(0) & 0xFF
if key == ord('\r'):
cv2.imwrite(path, frame)
return
elif key == 27: # esc
break

parser = argparse.ArgumentParser()
subs = parser.add_subparsers(dest="command", required=True)

sub_match = subs.add_parser("match")
sub_match.add_argument(
"-d", "--device", type=int, default=0,
help="the index of the video device")
sub_match.add_argument(
"-s", "--show", default=False, action="store_true",
help="show what the camera sees")
sub_match.add_argument(
"-w", "--wait", default=False, action="store_true",
help="wait for newline on stdin")
sub_match.add_argument(
"-t", "--delay", type=int, default=0,
help="wait n milliseconds between each frame")
sub_match.add_argument(
"faces", type=str, nargs="+",
help="the source image file(s)")

sub_record = subs.add_parser("record")
sub_record.add_argument(
"-d", "--device", type=int, default=0,
help="the index of the video device")
sub_record.add_argument(
"face", type=str,
help="the destination image file")

args = parser.parse_args()

if args.command == "match":
match(args.faces, args.device, args.show, args.delay, args.wait)
elif args.command == "record":
record(args.face, args.device)

+ 0
- 32
facewaiter.py Vedi File

@@ -1,32 +0,0 @@
#!/usr/bin/env python

import subprocess

keyboard = "AT Translated Set 2 keyboard"
battery = "/sys/class/power_supply/BAT0"
device = "2"
faces = "faces"
key = "36"

bat = False
if battery is not None:
with open(f"{battery}/status", "r") as f:
s = f.read().strip()
if s == "Discharging" or s == "Unknown":
bat = True

print("starting facematcher")
proc = subprocess.Popen(
f"./facematcher.py match --wait --delay 500 --device {device} {faces}/$USER/*",
stdin=subprocess.PIPE, shell=True)

if bat:
print(f"On battery, so waiting for {key}")
subprocess.check_output(
f"xinput test '{keyboard}' | grep --line-buffered 'key press {key}' | exit",
shell=True)
print(f"Got {key}.")

proc.stdin.write(b"hello\n")
proc.stdin.flush()
exit(proc.wait())

+ 155
- 0
lock.py Vedi File

@@ -0,0 +1,155 @@
#!/usr/bin/env python

import subprocess
import threading
import getpass
import os
import time

class I3Locker:
def run(self):
self.proc = subprocess.Popen([ "mlock" ])
code = self.proc.wait()
if code == 0 or self.killed:
return 0
else:
print("mlock exited with code "+str(code))
return -1

def kill(self):
self.killed = True
self.proc.terminate()

class FaceLocker:
def run(self):
self.delay = 200
self.dev = 2
self.running = True
self.waitingProc = None

# Import here because it's sloow
import numpy as np
import face_recognition
import cv2

# Read all face files
faces = []
faceencs = []
path = f"./faces/{getpass.getuser()}"
paths = []
for f in os.listdir(path):
p = f"{path}/{f}"
print(f"reading {p}")
face = face_recognition.load_image_file(p)
faces.append(face)
encs = face_recognition.face_encodings(face)
if len(encs) == 0:
print("Warning: "+path+" has no face!")
continue
faceencs.append(encs[0])
paths.append(p)

# Wait here if we're on battery
battery = "/sys/class/power_supply/BAT0"
keyboard = "AT Translated Set 2 keyboard"
key = 36
bat = False
with open(f"{battery}/status", "r") as f:
s = f.read().strip()
if s == "Discharging" or s == "Unknown":
bat = True
if bat:
print("Waiting for enter before starting face recognition")
self.waitForKey(keyboard, key)

# Match faces, blocks until a match is found or we're killed
self.runFaces(faceencs, paths, np, face_recognition, cv2)

if self.matching or self.killed:
return 0
else:
return -1

def runFaces(self, faceencs, paths, np, face_recognition, cv2):
self.matching = False
cap = cv2.VideoCapture(self.dev)
tacc = self.delay
then = 0
avg = 128
while not self.matching and self.running:
ret, frame = cap.read()
mean = cv2.mean(frame)[0]
avg = (avg + mean) / 2
if mean < avg:
continue

# delay
now = time.time() * 1000
if tacc < self.delay:
tacc += now - then
then = now
continue
else:
tacc = 0
then = now

scale = 1
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
small_rgb_frame = cv2.resize(rgb_frame, (0, 0), fx=scale, fy=scale)

framelocs = face_recognition.face_locations(small_rgb_frame)
frameencs = face_recognition.face_encodings(small_rgb_frame, framelocs)

# Loop through each face in this frame of video
for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
# See if the face is a match for the known face(s)
dists = face_recognition.face_distance(faceencs, frameenc)
dist = dists[0]
distidx = 0
for i, d in enumerate(dists):
if d < dist:
dist = d
distidx = i
print(f"Distance: {dist} ({paths[distidx]})")

# If a match was found in known_face_encodings, just use the first one.
if dist <= 0.4:
self.matching = True

def waitForKey(self, keyboard, key):
self.waitingProc = subprocess.Popen(
f"xinput test '{keyboard}' | grep --line-buffered 'key press {key}' | exit",
shell=True)
self.waitingProc.wait()

def kill(self):
self.killed = True
self.running = False
if self.waitingProc:
self.waitingProc.terminate()

lockers = [
I3Locker(),
FaceLocker(),
]

def runLocker(locker):
ret = locker.run()
if ret == 0:
print(locker.__class__.__name__+" unlocked.")
for l in lockers:
if l == locker:
continue
l.kill()
else:
print(locker.__class__.__name__+" failed.")


threads = []
for locker in lockers:
th = threading.Thread(target=runLocker, args=(locker,))
th.start()
threads.append(th)

for th in threads:
th.join()

+ 47
- 0
record.py Vedi File

@@ -0,0 +1,47 @@
#!/usr/bin/env python

import sys
import cv2
import numpy as np
import face_recognition

def record(path, dev):
def draw_face_rec(name, frame):
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
framelocs = face_recognition.face_locations(rgb_frame)
frameencs = face_recognition.face_encodings(rgb_frame, framelocs)
for (top, right, bottom, left), frameenc in zip(framelocs, frameencs):
cv2.rectangle(rgb_frame, (left, top), (right, bottom), (0, 0, 255), 2)

cv2.imshow(name, rgb_frame)

cap = cv2.VideoCapture(dev)

avg = 128
while True:
ret, frame = cap.read()
mean = cv2.mean(frame)[0]
avg = (avg + mean) / 2
if mean < avg:
continue

cv2.imshow("frame", frame)

key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('\r'):
cv2.imshow("frame", frame)
cv2.waitKey(1)
draw_face_rec("frame", frame)
while True:
key = cv2.waitKey(0) & 0xFF
if key == ord('\r'):
cv2.imwrite(path, frame)
return
elif key == 27: # esc
break

if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <path>")
record(sys.argv[1], 2)

Loading…
Annulla
Salva