get around weird face_recognition issue re BBR vs RGB images for encodings

This commit is contained in:
carl 2024-09-05 11:57:04 -03:00
parent cb8fd60779
commit 4276a46e25
4 changed files with 119 additions and 85 deletions

View File

@ -100,6 +100,7 @@ do
"face_recognition") LLP="$LLP:$LIB_DEEPFACE" "face_recognition") LLP="$LLP:$LIB_DEEPFACE"
PYP="$PYP:$PYP_DEEPFACE" PYP="$PYP:$PYP_DEEPFACE"
;; ;;
"regula") ;;
"camera") ;; "camera") ;;
*) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again." *) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
exit 1 exit 1

View File

@ -56,12 +56,13 @@ class FaceRecognition(FaceClass):
face_match_json = None face_match_json = None
conf = [] conf = []
def init(self,backend=None,model=None): #def init():
model = load_model("./emotion_detector_models/model.hdf5") #model = load_model("./emotion_detector_models/model.hdf5")
# @doc find all the faces in the named image # @doc find all the faces in the named image
def detect(self, name): def detect(self, name):
self.tree["detectms"] = time.time()
boxes = [] boxes = []
self.boxes = [] self.boxes = []
print("** face_recognition::detect ... %s" % name) print("** face_recognition::detect ... %s" % name)
@ -77,7 +78,8 @@ class FaceRecognition(FaceClass):
except Exception as ex: except Exception as ex:
self.errstr = "image processing exception at get_faces: "+str(ex) self.errstr = "image processing exception at get_faces: "+str(ex)
return '{ "status":222310, "remark":"image processing exception", "guilty_param":"error", "guilty_value":"%s" }' % str(ex) return '{ "status":222310, "remark":"image processing exception", "guilty_param":"error", "guilty_value":"%s" }' % str(ex)
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s }' % (len(self.boxes), json.dumps(self.boxes)) self.tree["detectms"] = time.time() - self.tree["detectms"]
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s, "time":%d }' % (len(self.boxes), json.dumps(self.boxes), self.tree["detectms"] )
# @doc find the landmarks of the given face # @doc find the landmarks of the given face
@ -102,9 +104,23 @@ class FaceRecognition(FaceClass):
# @doc compare two named images, previously loaded # @doc compare two named images, previously loaded
def compare(self, name1, name2): def compare(self, name1, name2):
self.tree["comparems"] = time.time()
print("** face_recognition::compare ... %s vs %s" % (name1,name2)) print("** face_recognition::compare ... %s vs %s" % (name1,name2))
self.encs[name1] = face_recognition.face_encodings(self.imgs[name1]) print("images available = ",self.imgs.keys())
self.encs[name2] = face_recognition.face_encodings(self.imgs[name2]) print(">>>>>>>>>>>>>>>> "+name1, self.imgs[name1])
# CCG 2024-09-05 Convert images to RGB so face_recognition can make encodings.
# Oddly, this is only needed on NUC (SMRT08) and work OK at home with the 2 commented lines below.
# Not sure why. Could be a lib thing. Worth keeping an eye on.
rgb = cv2.cvtColor(self.imgs[name1], cv2.COLOR_BGR2RGB)
self.encs[name1] = face_recognition.face_encodings(rgb)
rgb = cv2.cvtColor(self.imgs[name2], cv2.COLOR_BGR2RGB)
self.encs[name2] = face_recognition.face_encodings(rgb)
#self.encs[name1] = face_recognition.face_encodings(self.imgs[name1])
#self.encs[name2] = face_recognition.face_encodings(self.imgs[name2])
print("encodings available = ",self.encs.keys())
if self.encs[name1]==[]: if self.encs[name1]==[]:
return '{ "status":14330, "remark":"could not encode image", "guilty_param":"img1", "guilty_value":"%s" }' % name1 return '{ "status":14330, "remark":"could not encode image", "guilty_param":"img1", "guilty_value":"%s" }' % name1
if self.encs[name2]==[]: if self.encs[name2]==[]:
@ -124,7 +140,8 @@ class FaceRecognition(FaceClass):
print("** face_recognition::compare exception ... " + str(ex) ) print("** face_recognition::compare exception ... " + str(ex) )
self.errstr = "image comparison exception at compute_scores: "+str(ex) self.errstr = "image comparison exception at compute_scores: "+str(ex)
return '{ "status":332410, "remark":"%s" }' % self.errstr return '{ "status":332410, "remark":"%s" }' % self.errstr
return '{ "status":0, "remark":"OK", "score":%d }' % self.match_score[0] self.tree["comparems"] = time.time() - self.tree["comparems"]
return '{ "status":0, "remark":"OK", "score":%d, "detectms":%d, "comparems":%d }' % (self.match_score[0], self.tree["detectms"]*1000, self.tree["comparems"]*1000 )
@ -138,6 +155,26 @@ if __name__ == '__main__':
print(jsonstr) print(jsonstr)
if sys.argv[1]=="messi": if sys.argv[1]=="messi":
d.load2("messi4","testimg/messi4.jpg","messi2","testimg/messi2.jpg")
d.detect("messi4")
d.boxscale("messi4",0.3)
d.ideal("messi4","messi4_rect","messi4_crop")
d.detect("messi2")
d.boxscale("messi2",0.3)
d.ideal("messi2","messi2_rect","messi2_crop")
d.save("messi4","/tmp")
d.save("messi4_rect","/tmp")
d.save("messi4_crop","/tmp")
d.save("messi2","/tmp")
d.save("messi2_rect","/tmp")
d.save("messi2_crop","/tmp")
print("XXXXXXXXXXXXXXXXXXXXXXXX")
d.stats("messi2_rect")
d.stats("messi2_crop")
d.compare("messi4_crop","messi2_rect")
if sys.argv[1]=="crowd":
jsonstr = d.crowd_vs_govid("pic1", "testimg/messi4.jpg", 0, "pic2", "testimg/messi2.jpg", 0) jsonstr = d.crowd_vs_govid("pic1", "testimg/messi4.jpg", 0, "pic2", "testimg/messi2.jpg", 0)
print(jsonstr) print(jsonstr)

View File

@ -4,12 +4,15 @@ import cv2
import json import json
import base64 import base64
import time import time
import numpy as np
from copy import copy, deepcopy from copy import copy, deepcopy
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
class FaceClass(object): class FaceClass(object):
backend = None
model = None
imgs = {} imgs = {}
encs = {} encs = {}
faces = {} faces = {}
@ -17,7 +20,7 @@ class FaceClass(object):
files = {} files = {}
boxes = [] boxes = []
errstr = "" errstr = ""
tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0 } tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0, "detectms":0, "comparems":0 }
# # # # ##### # # # # #####
# ## # # # # ## # # #
@ -29,6 +32,8 @@ class FaceClass(object):
# Prep tasks # Prep tasks
def init(self, backend=None, mod=None): def init(self, backend=None, mod=None):
self.backend = backend
self.model = mod
return None return None
def clear(self): def clear(self):
@ -82,7 +87,7 @@ class FaceClass(object):
def ideal(self, name, rectname, cropname): def ideal(self, name, rectname, cropname):
found = -1 found = -1
biggest = -1 biggest = -1
print("** faceclass::ideal ... %s with %d boxes" % (name, len(self.boxes) )) print("** faceclass::ideal ... %s with %d boxes => %s + %s" % (name, len(self.boxes), rectname, cropname ))
# resize boxes # resize boxes
for i in range(len(self.boxes)): for i in range(len(self.boxes)):
b = self.boxes[i] b = self.boxes[i]
@ -125,21 +130,14 @@ class FaceClass(object):
def compare(self,name1,name2): def compare(self,name1,name2):
return '{ "status":88245, "remark":"override this!" }' return '{ "status":88245, "remark":"override this!" }'
def detect_all(self):
n=0
self.load("localcam", "/tmp/localcam.png", "regula", "/tmp/regula/Portrait_0.jpg")
self.get_faces("localcam")
self.get_faces("regula")
self.get_base_face()
print("computed ... ", d.compute_scores("regula","localcam"))
print("scores ... " , d.get_scores())
# @doc This does everything for you. If you are smartserv, "crowd" means cam and "govid" means regula pic # @doc This does everything for you.
def crowd_vs_govid(self, name1,file1,scale1, name2,file2,scale2): # If you are smartserv, "crowd" means cam and "govid" means regula pic
def crowd_vs_govid(self, name1,file1,scale1str, name2,file2,scale2str):
print("##1##") print("##1##")
t = time.time() scale1 = float(scale1str)
print ("///t = ", time.time() - t) scale2 = float(scale2str)
self.load1(name1, file1) self.load1(name1, file1)
if scale1 !=0: if scale1 !=0:
self.shrink(name1,scale1) self.shrink(name1,scale1)
@ -147,15 +145,13 @@ class FaceClass(object):
if json.loads(jsonstr)["status"]!=0: if json.loads(jsonstr)["status"]!=0:
return jsonstr return jsonstr
self.boxscale(name1,0.3) self.boxscale(name1,0.3)
self.ideal(name1,"pic1_rect","pic1_crop") self.ideal(name1,name1+"_rect",name1+"_crop")
self.save(name1,"/tmp") self.save(name1,"/tmp")
self.save("pic1_rect","/tmp") self.save(name1+"_rect","/tmp")
self.save("pic1_crop","/tmp") self.save(name1+"_crop","/tmp")
print(self.imgs.keys()) print(self.imgs.keys())
print ("///t = ", time.time() - t)
print("##2##") print("##2##")
print ("///t = ", time.time() - t)
self.load1(name2, file2) self.load1(name2, file2)
if scale2 !=0: if scale2 !=0:
self.shrink(name2,scale2) self.shrink(name2,scale2)
@ -164,17 +160,15 @@ class FaceClass(object):
if json.loads(jsonstr)["status"]!=0: if json.loads(jsonstr)["status"]!=0:
return jsonstr return jsonstr
self.boxscale(name2,0.3) self.boxscale(name2,0.3)
self.ideal(name2,"pic2_rect","pic2_crop") self.ideal(name2,name2+"_rect",name2+"_crop")
self.save(name2,"/tmp") self.save(name2,"/tmp")
self.save("pic2_rect","/tmp") self.save(name2+"_rect","/tmp")
self.save("pic2_crop","/tmp") self.save(name2+"_crop","/tmp")
print(self.imgs.keys()) print(self.imgs.keys())
print ("///t = ", time.time() - t)
print("##R##") print("##R##")
jsonstr = self.compare("pic1_crop","pic2_crop") jsonstr = self.compare(name1+"_crop",name2+"_crop")
print(jsonstr) print(jsonstr)
print ("///t = ", time.time() - t)
###### ##### # ##### ###### ##### # #####
@ -210,7 +204,7 @@ class FaceClass(object):
# @doc draw a box on an image # @doc draw a box on an image
def rect(self, name, x1, y1, x2, y2, color): def rect(self, name, x1, y1, x2, y2, color):
#print ("recting ",x1,y1,x2,y2) print ("recting ",x1,y1,x2,y2)
cv2.rectangle(self.imgs[name],(x1,y1),(x2,y2), color, 4) cv2.rectangle(self.imgs[name],(x1,y1),(x2,y2), color, 4)
# @doc crop an image, allowing a gutter. # @doc crop an image, allowing a gutter.
@ -223,6 +217,8 @@ class FaceClass(object):
print ("shrinking ",name) print ("shrinking ",name)
self.imgs[name] = cv2.resize(self.imgs[name],None,fx=skale,fy=skale) self.imgs[name] = cv2.resize(self.imgs[name],None,fx=skale,fy=skale)
##### ###### ##### # # ##### # # ##### ###### ##### # # ##### # #
# # # # # # # # ## # # # # # # # # # ## #
# # ##### # # # # # # # # # # ##### # # # # # # # #
@ -242,22 +238,26 @@ class FaceClass(object):
def save(self,which,dir,format="png"): def save(self,which,dir,format="png"):
cv2.imwrite(dir + "/" + which + "." + format, self.imgs[which]) cv2.imwrite(dir + "/" + which + "." + format, self.imgs[which])
def stats(self,which):
print(which + " stats....")
print(type(self.imgs[which]))
print(np.shape(self.imgs[which]))
if __name__ == '__main__': if __name__ == '__main__':
# tests
# big/clear big/clear small/side small/clear obscured ig card IR cam some rotated
picfiles = [ "gerrard.jpg", "ronaldo.jpg", "messi1.jpg", "messi2.jpg", "ox_matt.png", "ox_govid.jpg", "cg_ir.jpg", "fivepeople.jpg" ]
# Test the dlib image detector
d = FaceClass() d = FaceClass()
# quick test d.load1("messi1","testimg/messi1.jpg")
if sys.argv[1]=="quick": d.load2("messi2","testimg/messi2.jpg", "messi3","testimg/messi3.jpg")
d.visual = 1
d.load("messi1","messi2","testimg/messi1.jpg","testimg/messi2.jpg")
sys.exit(0) # look for pics in /tmp
d.save("messi1", "/tmp")
d.save("messi2", "/tmp")
d.save("messi3", "/tmp")

View File

@ -59,15 +59,15 @@ class yoloserv(object):
# Object (face) matching # Object (face) matching
if "paravision" in self.devices: if "paravision" in self.devices:
print("Loading paravision facematch...") print("Loading paravision facematch...")
from para_facematch import Facematch from paravisionx import Paravision
self.facematcher = Facematch() self.facematcher = Paravision()
self.facematcher.init() self.facematcher.init()
if "deepface" in self.devices: if "deepface" in self.devices:
print("Loading deepface facematch...") print("Loading deepface facematch...")
from deepfacex import Deepfacex from deepfacex import Deepfacex
self.facematcher = Deepfacex() self.facematcher = Deepfacex()
self.facematcher.init("dlib","SFace") self.facematcher.init("dlib","Facenet512")
if "face_recognition" in self.devices: if "face_recognition" in self.devices:
print("Loading face_recognition facematch...") print("Loading face_recognition facematch...")
@ -76,7 +76,7 @@ class yoloserv(object):
self.facematcher.init() self.facematcher.init()
# Object (face) matching # Object (face) matching
if "facematch_open" in self.devices: if "facematch" in self.devices:
print("Loading paravision facematch...") print("Loading paravision facematch...")
from facematch_open import Facematch from facematch_open import Facematch
self.facematcher = Facematch() self.facematcher = Facematch()
@ -130,7 +130,6 @@ class yoloserv(object):
# @doc clear memory of all image data # @doc clear memory of all image data
@cherrypy.expose @cherrypy.expose
def svc_init(self): def svc_init(self):
self.facematcher.init()
return '{ "status":0, "remark":"OK" }' return '{ "status":0, "remark":"OK" }'
# @doc Acquire image - # @doc Acquire image -
@ -186,9 +185,8 @@ class yoloserv(object):
return self.facematcher.load2(name1, self.indir + infile1, name2, self.indir + infile2) return self.facematcher.load2(name1, self.indir + infile1, name2, self.indir + infile2)
@cherrypy.expose @cherrypy.expose
def svc_detect_faces(self,infile): def svc_detect_faces(self,name):
nfaces = self.facematcher.detect_all(self.indir + infile) return self.facematcher.detect(name)
return '{ "status":0, "remark":"found faces", "count":%d }' % (nfaces)
# @doc find all the faces in the named image that was loaded using the above calls (test OK CG 2024-0724) # @doc find all the faces in the named image that was loaded using the above calls (test OK CG 2024-0724)
@cherrypy.expose @cherrypy.expose
@ -196,10 +194,10 @@ class yoloserv(object):
return self.facematcher.get_faces(which) return self.facematcher.get_faces(which)
# @doc find the most prominent face in the named image that was loaded using the above calls (test OK CG 2024-0724) # @doc find the most prominent face in the named image that was loaded using the above calls (test OK CG 2024-0724)
# you can access the new ideal face (if present) with the image name "_ideal" # you can access the new ideal face (if present) with the image name "_crop"
@cherrypy.expose @cherrypy.expose
def svc_ideal(self,which,rectname,cropname): def svc_ideal(self,which):
return self.facematcher.get_ideal(which,rectname,cropname) return self.facematcher.ideal(which,which+"_rect",which+"_crop")
# @doc dumps the named image as an <img> tag for straight to HTML output (test OK CG 2024-0724) # @doc dumps the named image as an <img> tag for straight to HTML output (test OK CG 2024-0724)
@cherrypy.expose @cherrypy.expose
@ -207,18 +205,15 @@ class yoloserv(object):
buf = self.facematcher.dump64(which).decode() buf = self.facematcher.dump64(which).decode()
return "<html><img src='%s'/></html>" % buf return "<html><img src='%s'/></html>" % buf
@cherrypy.expose
def svc_save_face(self,which):
return self.facematcher.save(which, self.conf["yolo_outdir"]);
# Match faces together # Match faces together
@cherrypy.expose @cherrypy.expose
def svc_compare(self,name1,name2): def svc_compare(self,name1,name2):
return self.facematcher.compare(name1,name2) return self.facematcher.compare(name1,name2)
@cherrypy.expose
def svc_facematch(self,name1,infile1,name2,infile2):
self.facematcher.init()
return self.facematcher.get_faces(name1)
return self.facematcher.get_faces(name2)
self.facematcher.load2(name1, self.indir + infile1, name2, self.indir + infile2)
return self.facematcher.compare(name1,name2)
@cherrypy.expose @cherrypy.expose
@ -237,14 +232,19 @@ class yoloserv(object):
# Match faces together # Match faces together
@cherrypy.expose @cherrypy.expose
def svc_match_faces(self,infile1,infile2): def svc_match_faces(self,dev1,fil1,scl1,dev2,fil2,scl2):
return self.facematcher.detect(infile1,infile2) jsonstr = self.facematcher.crowd_vs_govid(dev1,self.conf["yolo_indir"]+fil1,scl1, dev2,self.conf["yolo_outdir"]+fil2,scl2)
obj = self.json2obj(jsonstr)
return jsonstr
if obj.status > 0:
return jsonstr
def json2obj(self,jsonx): def json2obj(self,jsonx):
return json.laods(jsonx) return json.laods(jsonx)
# @doc put all the steps for a retail facematch into one convenient functions # @doc put all the steps for a retail facematch into one convenient functions
@cherrypy.expose @cherrypy.expose
def retail_facematch(self,dev1,dev2): def svc_facematch(self,dev1,dev2):
if self.facematcher is None: if self.facematcher is None:
return '{ "status":777244, "remark":"suitable yolo_device" }' return '{ "status":777244, "remark":"suitable yolo_device" }'
@ -252,29 +252,25 @@ class yoloserv(object):
return '{ "status":0, "remark":"OK(Emulated)", "data":{"device1":"%s","device2":"%s","device1_qual":123,"device2_qual":234,"score":600} }' % (dev1,dev2) return '{ "status":0, "remark":"OK(Emulated)", "data":{"device1":"%s","device2":"%s","device1_qual":123,"device2_qual":234,"score":600} }' % (dev1,dev2)
if dev1 == "regula": if dev1 == "regula":
img1 = "/tmp/regula/Portrait_0.jpg" fil1 = "/tmp/regula/Portrait_0.jpg"
scl1 = 0.25
if dev1 == "localcam": if dev1 == "localcam":
img1 = "/tmp/localcam.png" fil1 = "/tmp/localcam.png"
scl1 = 0.5
if dev2 == "regula": if dev2 == "regula":
img2 = "/tmp/regula/Portrait_0.jpg" fil2 = "/tmp/regula/Portrait_0.jpg"
scl2 = 0.25
if dev2 == "localcam": if dev2 == "localcam":
img2 = "/tmp/localcam.png" fil2 = "/tmp/localcam.png"
scl2 = 0.5
if self.conf["emulate_facematch"]: if self.conf["emulate_facematch"]:
return '{ "status":0, "remark":"OK", "data":{} }' return '{ "status":0, "remark":"OK", "data":{} }'
obj = self.json2obj(self.facematcher.load_faces(dev1, img1, dev2, img2)) jsonstr = self.facematcher.crowd_vs_govid(dev1,fil1,scl1, dev2,fil2,scl2)
if obj["status"] > 0: obj = self.json2obj(jsonstr)
return jsonx if obj.status > 0:
obj = self.json2obj(self.facematcher.get_faces(dev1)) return jsonstr
if obj["status"] > 0: jsonstr = self.facematcher.scores()
return jsonx
obj = self.json2obj(self.facematcher.get_faces(dev2))
if obj["status"] > 0:
return jsonx
obj = self.json2obj(self.facematcher.compute_scores(dev1,dev2))
if obj["status"] > 0:
return jsonx
jsonstr = self.facematcher.get_scores()
return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr) return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr)