201 lines
7.7 KiB
Python
201 lines
7.7 KiB
Python
import sys
|
|
import json
|
|
import os
|
|
# Generic deepface
|
|
from deepface import DeepFace
|
|
from deepface.detectors import FaceDetector
|
|
# dlib face detector
|
|
import dlib
|
|
# facenet512
|
|
from keras.models import model_from_json
|
|
from faceclass import FaceClass
|
|
#from inception_resnet_v1 import *
|
|
|
|
#
|
|
# Use "deepface" to perform face matching.
|
|
# The first time it runs it will download a LOT of stuff.
|
|
# metrics = ["cosine", "euclidean", "euclidean_l2"]
|
|
# backends = [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn'
|
|
# models = [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]
|
|
|
|
|
|
class Deepfacex(FaceClass):
|
|
img1_path = ""
|
|
img2_path = ""
|
|
backend = "ssd"
|
|
fn512_model = None
|
|
resnet_model = None
|
|
visual = 0
|
|
tree = { "img1_faces":0, "img1_qual":0, "img2_faces":0, "img2_qual":0, "threshold":0, "score":0 }
|
|
|
|
def init(self, backend, model):
|
|
print("Loading models...")
|
|
self.backend = backend
|
|
self.model = model
|
|
#self.fn512_model = model_from_json(open("modules/facenet512/facenet_model.json", "r").read())
|
|
#self.fn512_model.load_weights('modules/facenet512/facenet_weights.h5')
|
|
#self.resnet_model = InceptionResNetV1()
|
|
#model.summary()
|
|
print("Loaded models ;)")
|
|
|
|
def clear(self):
|
|
self.imgs = {}
|
|
self.faces = {}
|
|
|
|
def l2_normalize(x):
|
|
return x / np.sqrt(np.sum(np.multiply(x, x)))
|
|
|
|
|
|
def dlib_detector(self, name):
|
|
UPSAMPLE = 1
|
|
detector = dlib.get_frontal_face_detector()
|
|
#img = dlib.load_rgb_image(fname)
|
|
dets = detector(self.imgs[name], UPSAMPLE)
|
|
self.tree["img1_faces"] = len(dets)
|
|
print("Number of faces detected in %s: %d" % (name, len(dets)))
|
|
for i, d in enumerate(dets):
|
|
print("Detection %d: Left: %d Top: %d Right: %d Bottom: %d" % (i, d.left(), d.top(), d.right(), d.bottom() ) )
|
|
|
|
|
|
def dlib_detector2(self, name):
|
|
UPSAMPLE = 1
|
|
detector = dlib.get_frontal_face_detector()
|
|
#img = dlib.load_rgb_image(fname)
|
|
dets, scores, idx = detector.run(self.imgs[name], UPSAMPLE, -1)
|
|
print("Number of faces detected: %d" % len(dets))
|
|
if len(dets)>0:
|
|
print("Face scores = ",scores[0])
|
|
return dets, scores
|
|
|
|
|
|
# Detects all the faces
|
|
def detect_all(self, name, fname):
|
|
print("Finding faces in %s: %s" % (name,fname))
|
|
detector = FaceDetector.build_model(self.backend) #set opencv, ssd, dlib, mtcnn or retinaface
|
|
self.faces[name] = FaceDetector.detect_faces(detector, self.backend, fname)
|
|
print(" Found %d faces for %s" % (len(self.faces), name))
|
|
return len(self.faces[name])
|
|
|
|
|
|
# Compare the two pics
|
|
def process(self):
|
|
print("Matching %s vs %s" % (self.imfiles[0], self.imfiles[1]))
|
|
verification = DeepFace.verify(img1_path = self.imfiles[0], img2_path = self.imfiles[1], model_name=self.model,\
|
|
detector_backend=self.backend, distance_metric="euclidean", enforce_detection=False, align=True, normalization="base")
|
|
return verification
|
|
|
|
def facematch(self):
|
|
dets1, scores1 = self.dlib_detector2("localcam")
|
|
self.tree["img1_faces"] = len(dets1)
|
|
if len(dets1) > 0:
|
|
self.tree["img1_qual"] = scores1[0]
|
|
|
|
dets2, scores2 = self.dlib_detector2("regula")
|
|
self.tree["img2_faces"] = len(dets2)
|
|
if len(dets2) > 0:
|
|
self.tree["img2_qual"] = scores2[0]
|
|
|
|
if len(dets1) < 1:
|
|
return '{ "status":787101, "remark":"no faces in cam image", "data":%s }' % (json.dumps(self.tree))
|
|
if len(dets2) < 1:
|
|
return '{ "status":787102, "remark":"no faces in ID image", "data":%s }' % (json.dumps(self.tree))
|
|
|
|
verif = d.process()
|
|
self.tree["score"] = verif["distance"]
|
|
self.tree["threshold"] = verif["threshold"]
|
|
return '{ "status":0, "remark":"OK", "data":%s }' % (json.dumps(self.tree))
|
|
|
|
|
|
def analyse(self):
|
|
analysis = DeepFace.analyze(img_path = self.img1_path, actions = ["age", "gender", "emotion", "race"])
|
|
return json.dumps(analysis)
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
# tests
|
|
# big/clear big/clear small/side small/clear obscured ig card IR cam some rotated
|
|
picfiles = [ "gerrard.jpg", "ronaldo.jpg", "messi1.jpg", "messi2.jpg", "ox_matt.png", "ox_govid.jpg", "cg_ir.jpg", "fivepeople.jpg" ]
|
|
|
|
# Test the dlib image detector
|
|
d = Deepfacex()
|
|
d.init("dlib","SFace")
|
|
|
|
# kiosk test
|
|
if sys.argv[1]=="kiosk":
|
|
print(d.load("localcam","regula","/tmp/localcam.png","/tmp/regula/Portrait_0.jpg"))
|
|
print (d.facematch())
|
|
|
|
# quick test
|
|
if sys.argv[1]=="quick":
|
|
d.visual = 1
|
|
d.load("messi1","messi2","testimg/messi1.jpg","testimg/messi2.jpg")
|
|
sys.exit(0)
|
|
|
|
# 99.4% detection
|
|
if sys.argv[1]=="detect":
|
|
# lfw
|
|
n=0
|
|
print("LFW Testing")
|
|
for lfwdir in sorted(os.listdir("/home/carl/Downloads/lfw")):
|
|
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/" + lfwdir)):
|
|
d.load(lfw, "/home/carl/Downloads/lfw/" + lfwdir + "/" + lfw)
|
|
d.dlib_detector(lfw)
|
|
d.clear()
|
|
n+=1
|
|
if n > 100:
|
|
sys.exit(0)
|
|
|
|
|
|
# Three difficult faces; retinaface,yolov8 find all 3; dlib,mtcnn, finds 2; opencv,ssd,mediapipe,yunet find <=1
|
|
if sys.argv[1]=="algo1a":
|
|
mod = 'Dlib'
|
|
d.load("ox_matt","testimg/ox_matt.png")
|
|
for back in [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn' ]:
|
|
d.init(back,mod)
|
|
print (back,mod,d.detect_all("ox_matt"))
|
|
sys.exit(0)
|
|
# Five clear but rotated faces; dlib,mtcnn,retinaface,yolov8 find all 5; ssd find 4; opencv,mediapipe,yunet find <=3
|
|
if sys.argv[1]=="algo1b":
|
|
mod = 'Dlib'
|
|
d.load("5people","testimg/fivepeople.jpg")
|
|
for back in [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn' ]:
|
|
d.init(back,mod)
|
|
print (back,mod,d.detect_all("5people"))
|
|
sys.exit(0)
|
|
|
|
|
|
# Try and find difficult messi matches;
|
|
# Best performers: Dlib, Sface
|
|
if sys.argv[1]=="algo2a":
|
|
back = "yolov8"
|
|
for mod in [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]:
|
|
d.init(back,mod)
|
|
print (d.process("testimg/messi1.jpg", "testimg/messi2.jpg"))
|
|
sys.exit(0)
|
|
# Try and find more difficult matches;
|
|
# Best performers: VGG(6/7), Facenet512(5/7), Dlib(6/7), Sface(7/7)
|
|
if sys.argv[1]=="algo2b":
|
|
difficult = [ "Abdullatif_Sener", "Adam_Scott", "Alex_Barros", "Alison_Lohman", "Amelie_Mauresmo", "Andy_Roddick", "Anna_Nicole_Smith" ]
|
|
back = "yolov8"
|
|
for dif in difficult:
|
|
for mod in [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]:
|
|
d.init(back,mod)
|
|
print (d.process("/home/carl/Downloads/lfw/0001/%s.jpg" % dif, "/home/carl/Downloads/lfw/0002/%s.jpg" % dif, ))
|
|
sys.exit(0)
|
|
|
|
|
|
# Get stats on a particular algorithm. dlib+DLib=99/100|971/1000=97.1%; dlib+Facenet512=94/100; dlib+VGG=87100;
|
|
# dlib+SFace=100/100|199/200|991/1000==99.1; 1662/1680=98.9%
|
|
if sys.argv[1]=="stats":
|
|
# lfw
|
|
n=0
|
|
print("LFW Stats")
|
|
d.init("dlib","SFace")
|
|
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/0002")):
|
|
print (d.process("/home/carl/Downloads/lfw/0001/" + lfw, "/home/carl/Downloads/lfw/0002/" + lfw))
|
|
d.clear()
|
|
n+=1
|
|
if n > 4000:
|
|
sys.exit(0)
|