paravision alternative

This commit is contained in:
carl 2024-01-22 10:40:19 -04:00
parent caf8315162
commit 88ddc44313
3 changed files with 182 additions and 69 deletions

View File

@ -27,6 +27,7 @@
#
# sudo pip3 install openvino opencv-python yolov5 yolov8
. awesome_venv/bin/activate
HERE=$PWD
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"

View File

@ -1,50 +1,179 @@
import sys
import json
import os
# Generic deepface
from deepface import DeepFace
from deepface.detectors import FaceDetector
# dlib face detector
import dlib
# facenet512
from keras.models import model_from_json
from faceclass import FaceClass
#from inception_resnet_v1 import *
class Deepfacex(object):
#
# Use "deepface" to perform face matching.
# The first time it runs it will download a LOT of stuff.
# metrics = ["cosine", "euclidean", "euclidean_l2"]
# backends = [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn'
# models = [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]
class Deepfacex(FaceClass):
img1_path = ""
img2_path = ""
modelname = "facenet512"
backend = "ssd"
imgs = {}
faces = {}
fn512_model = None
resnet_model = None
visual = 0
def init(self, backend, model):
print("Loading models...")
self.backend = backend
self.model = model
#self.fn512_model = model_from_json(open("modules/facenet512/facenet_model.json", "r").read())
#self.fn512_model.load_weights('modules/facenet512/facenet_weights.h5')
#self.resnet_model = InceptionResNetV1()
#model.summary()
print("Loaded models ;)")
# Load the pics and device labels
def load(self, dev1, dev2, id_image_filepath, photo_image_filepath):
print("load1")
self.dev1 = dev1
self.dev2 = dev2
try:
# Load images
print("load2")
self.img1_path = load_image(id_image_filepath)
self.img2_path = load_image(photo_image_filepath)
print("++++++++++++++++ ",self.id_image)
return True
except:
return None
def clear(self):
self.imgs = {}
self.faces = {}
def l2_normalize(x):
return x / np.sqrt(np.sum(np.multiply(x, x)))
def dlib_detector(self, name):
UPSAMPLE = 1
detector = dlib.get_frontal_face_detector()
#img = dlib.load_rgb_image(fname)
dets = detector(self.imgs[name], UPSAMPLE)
print("Number of faces detected in %s: %d" % (name, len(dets)))
for i, d in enumerate(dets):
print("Detection %d: Left: %d Top: %d Right: %d Bottom: %d" % (i, d.left(), d.top(), d.right(), d.bottom() ) )
def dlib_detector2(self, name):
UPSAMPLE = 1
detector = dlib.get_frontal_face_detector()
#img = dlib.load_rgb_image(fname)
dets, scores, idx = detector.run(self.imgs[name], UPSAMPLE, -1)
print("Number of faces detected: %d" % len(dets))
for i, d in enumerate(dets):
print("Detection %d: Score: %d Type: %d Left: %d Top: %d Right: %d Bottom: %d" % (i, scores[i], idx[i], d.left(), d.top(), d.right(), d.bottom() ) )
# Load two pics using their device labels
def loads(self, dev1, dev2, id_image_filepath, photo_image_filepath):
self.load(dev1,id_image_filepath)
self.load(dev2,photo_image_filepath)
# Detects all the faces
def detect_all(self, name):
detector = FaceDetector.build_model(self.backend) #set opencv, ssd, dlib, mtcnn or retinaface
self.faces[name] = FaceDetector.detect_faces(detector, self.backend, self.imgs[name])
print(" Found %d faces for %s" % (len(self.faces[name]), name))
return len(self.faces[name])
# Compare the two pics
def process(self):
print("proc1")
verification = DeepFace.verify(img1_path = self.img1_filepath, img2_path = self.img2_filepath, model_name=self.modelname, detector_backend="opencv", distance_metric="euclidian", enforce_detection=True, align=True, normalization="base")
print("proc2")
# Fields:
# boolean verified; float distance; float max_threshold_to_verify; string model; string similarity_metric
def process(self,impath1,impath2):
print("Matching %s vs %s" % (impath1,impath2))
verification = DeepFace.verify(img1_path = impath1, img2_path = impath2, model_name=self.model,\
detector_backend=self.backend, distance_metric="euclidean", enforce_detection=False, align=True, normalization="base")
return json.dumps(verification)
def analyse(self):
analysis = DeepFace.analyze(img_path = self.img1_filepath, actions = ["age", "gender", "emotion", "race"])
analysis = DeepFace.analyze(img_path = self.img1_path, actions = ["age", "gender", "emotion", "race"])
return json.dumps(analysis)
if __name__ == '__main__':
print("main1")
# tests
# big/clear big/clear small/side small/clear obscured ig card IR cam some rotated
picfiles = [ "gerrard.jpg", "ronaldo.jpg", "messi1.jpg", "messi2.jpg", "ox_matt.png", "ox_govid.jpg", "cg_ir.jpg", "fivepeople.jpg" ]
# Test the dlib image detector
d = Deepfacex()
print("main2")
d.load("file1","file2","testimg/test1.jpg","testimg/test2.jpg")
print("main3")
d.init("dlib","Facenet512")
print (d.process())
# quick test
if sys.argv[1]=="quick":
d.visual = 1
d.load("messi1","messi2","testimg/messi1.jpg","testimg/messi2.jpg")
sys.exit(0)
# 99.4% detection
if sys.argv[1]=="detect":
# lfw
n=0
print("LFW Testing")
for lfwdir in sorted(os.listdir("/home/carl/Downloads/lfw")):
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/" + lfwdir)):
d.load(lfw, "/home/carl/Downloads/lfw/" + lfwdir + "/" + lfw)
d.dlib_detector(lfw)
d.clear()
n+=1
if n > 100:
sys.exit(0)
# Three difficult faces; retinaface,yolov8 find all 3; dlib,mtcnn, finds 2; opencv,ssd,mediapipe,yunet find <=1
if sys.argv[1]=="algo1a":
mod = 'Dlib'
d.load("ox_matt","testimg/ox_matt.png")
for back in [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn' ]:
d.init(back,mod)
print (back,mod,d.detect_all("ox_matt"))
sys.exit(0)
# Five clear but rotated faces; dlib,mtcnn,retinaface,yolov8 find all 5; ssd find 4; opencv,mediapipe,yunet find <=3
if sys.argv[1]=="algo1b":
mod = 'Dlib'
d.load("5people","testimg/fivepeople.jpg")
for back in [ 'opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe', 'yolov8', 'yunet', 'fastmtcnn' ]:
d.init(back,mod)
print (back,mod,d.detect_all("5people"))
sys.exit(0)
# Try and find difficult messi matches;
# Best performers: Dlib, Sface
if sys.argv[1]=="algo2a":
back = "yolov8"
for mod in [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]:
d.init(back,mod)
print (d.process("testimg/messi1.jpg", "testimg/messi2.jpg"))
sys.exit(0)
# Try and find more difficult matches;
# Best performers: VGG(6/7), Facenet512(5/7), Dlib(6/7), Sface(7/7)
if sys.argv[1]=="algo2b":
difficult = [ "Abdullatif_Sener", "Adam_Scott", "Alex_Barros", "Alison_Lohman", "Amelie_Mauresmo", "Andy_Roddick", "Anna_Nicole_Smith" ]
back = "yolov8"
for dif in difficult:
for mod in [ "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace" ]:
d.init(back,mod)
print (d.process("/home/carl/Downloads/lfw/0001/%s.jpg" % dif, "/home/carl/Downloads/lfw/0002/%s.jpg" % dif, ))
sys.exit(0)
# Get stats on a particular algorithm. dlib+DLib=99/100|971/1000=97.1%; dlib+Facenet512=94/100; dlib+VGG=87100;
# dlib+SFace=100/100|199/200|991/1000==99.1; 1662/1680=98.9%
if sys.argv[1]=="stats":
# lfw
n=0
print("LFW Stats")
d.init("dlib","SFace")
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/0002")):
print (d.process("/home/carl/Downloads/lfw/0001/" + lfw, "/home/carl/Downloads/lfw/0002/" + lfw))
d.clear()
n+=1
if n > 4000:
sys.exit(0)

View File

@ -59,6 +59,16 @@ class yoloserv(object):
from para_facematch import Facematch
self.facematcher = Facematch()
self.facematcher.init()
if "deepface" in self.devices:
print("Loading deepface facematch...")
from deepfacex import Deepfacex
self.facematcher = Deepfacex()
self.facematcher.init()
if "face_recognition" in self.devices:
print("Loading deepface facematch...")
from face_recognition import FaceRecognition
self.facematcher = FaceRecognition()
self.facematcher.init()
# Image acquisition
if "camera" in self.devices:
@ -135,13 +145,16 @@ class yoloserv(object):
# Find faces - the algorithm used depends on the device list and what actual file was loaded as self.camera
# Simply load an image
@cherrypy.expose
def svc_load_face(self,infile):
self.facematcher.load(infile)
@cherrypy.expose
def svc_detect_faces(self,infile):
self.facematcher.detect_all(infile)
nfaces = self.facematcher.detect_all(self.imgdir + infile)
return '{ "status":0, "remark":"found faces", "count":%d }' % (nfaces)
# Find the most prominent object
@cherrypy.expose
@ -183,51 +196,21 @@ class yoloserv(object):
if status is not None:
return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
status = self.facematcher.get_faces()
status = self.facematcher.analyse()
if status is not None:
return '{ "status":777245, "remark":"face finding failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
status = self.facematcher.compute_scores()
if status is not None:
return '{ "status":777243, "remark":"face scoring failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
#status = self.facematcher.get_faces()
#if status is not None:
# return '{ "status":777245, "remark":"face finding failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
jsonstr = self.facematcher.get_scores()
#status = self.facematcher.compute_scores()
#if status is not None:
# return '{ "status":777243, "remark":"face scoring failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
jsonstr = self.facematcher.process()
return '{ "status":0, "remark":"OK", "data":%s }' % (jsonstr)
################
if "facematch" in self.device_list and self.conf["emulate_facematch"]==0:
from facematch import Facematch
self.devices["facematch"] = Facematch()
@cherrypy.expose
def facematch(self,dev1,dev2):
if self.conf["emulate_facematch"]:
return '{ "status":0, "remark":"OK(Emulated)", "data":{"device1":"%s","device2":"%s","device1_qual":123,"device2_qual":234,"match_score":600} }' % (dev1,dev2)
if dev1 == "regula":
img1 = "/tmp/regula/Portrait_0.jpg"
if dev1 == "localcam":
img1 = "/tmp/localcam.png"
if dev2 == "regula":
img2 = "/tmp/regula/Portrait_0.jpg"
if dev2 == "localcam":
img2 = "/tmp/localcam.png"
if self.conf["emulate_facematch"]:
return '{ "status":0, "remark":"OK", "data":{} }'
self.devices["facematch"].load(dev1, dev2, img1, img2)
status = self.devices["facematch"].get_faces()
if status is not None:
return '{ "status":777242, "remark":"face matching failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
status = self.devices["facematch"].compute_scores()
if status is not None:
return '{ "status":777243, "remark":"face scoring failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
jsonstr = self.devices["facematch"].get_scores()
return '{ "status":0, "remark":"OK", "data":%s }' % (jsonstr)
@cherrypy.expose