lock in open source yoloserv
This commit is contained in:
parent
10798e3162
commit
6dc3331120
@ -1,6 +1,7 @@
|
|||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import cv2
|
||||||
# Generic deepface
|
# Generic deepface
|
||||||
from deepface import DeepFace
|
from deepface import DeepFace
|
||||||
from deepface.detectors import FaceDetector
|
from deepface.detectors import FaceDetector
|
||||||
@ -26,7 +27,7 @@ class Deepfacex(FaceClass):
|
|||||||
fn512_model = None
|
fn512_model = None
|
||||||
resnet_model = None
|
resnet_model = None
|
||||||
visual = 0
|
visual = 0
|
||||||
tree = { "img1_faces":0, "img1_qual":0, "img2_faces":0, "img2_qual":0, "threshold":0, "score":0 }
|
tree = { "img1_faces":0, "img1_best":0, "img1_qual":0, "img2_faces":0, "img2_best": 0, "img2_qual":0, "threshold":0, "score":0, "matched":0 }
|
||||||
|
|
||||||
def init(self, backend, model):
|
def init(self, backend, model):
|
||||||
print("Loading models...")
|
print("Loading models...")
|
||||||
@ -43,7 +44,7 @@ class Deepfacex(FaceClass):
|
|||||||
self.faces = {}
|
self.faces = {}
|
||||||
|
|
||||||
def l2_normalize(x):
|
def l2_normalize(x):
|
||||||
return x / np.sqrt(np.sum(np.multiply(x, x)))
|
return x / np.sqrt(np.sum(np.multiply(x, x)))
|
||||||
|
|
||||||
|
|
||||||
def dlib_detector(self, name):
|
def dlib_detector(self, name):
|
||||||
@ -54,7 +55,12 @@ class Deepfacex(FaceClass):
|
|||||||
self.tree["img1_faces"] = len(dets)
|
self.tree["img1_faces"] = len(dets)
|
||||||
print("Number of faces detected in %s: %d" % (name, len(dets)))
|
print("Number of faces detected in %s: %d" % (name, len(dets)))
|
||||||
for i, d in enumerate(dets):
|
for i, d in enumerate(dets):
|
||||||
print("Detection %d: Left: %d Top: %d Right: %d Bottom: %d" % (i, d.left(), d.top(), d.right(), d.bottom() ) )
|
print("Detection %d: Left: %d Top: %d Right: %d Bottom: %d Confidence: %f" % (i, d.left(), d.top(), d.right(), d.bottom(), d.confidence() ) )
|
||||||
|
print("Biggest face was %d" % maxi )
|
||||||
|
return dets, scores, 0
|
||||||
|
|
||||||
|
|
||||||
|
#def crop(self,img,topleft,botright):
|
||||||
|
|
||||||
|
|
||||||
def dlib_detector2(self, name):
|
def dlib_detector2(self, name):
|
||||||
@ -63,10 +69,20 @@ class Deepfacex(FaceClass):
|
|||||||
#img = dlib.load_rgb_image(fname)
|
#img = dlib.load_rgb_image(fname)
|
||||||
dets, scores, idx = detector.run(self.imgs[name], UPSAMPLE, -1)
|
dets, scores, idx = detector.run(self.imgs[name], UPSAMPLE, -1)
|
||||||
print("Number of faces detected: %d" % len(dets))
|
print("Number of faces detected: %d" % len(dets))
|
||||||
if len(dets)>0:
|
for i, d in enumerate(dets):
|
||||||
print("Face scores = ",scores[0])
|
#print(d.__dir__())
|
||||||
return dets, scores
|
print("Detection %d: Left: %d Top: %d Right: %d Bottom: %d Confidence: %f" % (i, d.left(), d.top(), d.right(), d.bottom(), scores[i] ) )
|
||||||
|
cropped = self.crop(self.imgs[name], d.left(), d.top(), d.right(), d.bottom())
|
||||||
|
cv2.imwrite("/tmp/%s_%d.png" % (name,i), cropped)
|
||||||
|
return dets, scores, 0
|
||||||
|
|
||||||
|
def crop(self, img, x1,y1,x2,y2):
|
||||||
|
#shape(img)
|
||||||
|
if x1<0 or x2<0 or y1<0 or y2<0:
|
||||||
|
return img
|
||||||
|
print("cropping %d/%d to %d/%d" % (x1,y1, x2,y2) )
|
||||||
|
cropped = img[ x1:x2, y1:y2 ]
|
||||||
|
return cropped
|
||||||
|
|
||||||
# Detects all the faces
|
# Detects all the faces
|
||||||
def detect_all(self, name, fname):
|
def detect_all(self, name, fname):
|
||||||
@ -74,6 +90,7 @@ class Deepfacex(FaceClass):
|
|||||||
detector = FaceDetector.build_model(self.backend) #set opencv, ssd, dlib, mtcnn or retinaface
|
detector = FaceDetector.build_model(self.backend) #set opencv, ssd, dlib, mtcnn or retinaface
|
||||||
self.faces[name] = FaceDetector.detect_faces(detector, self.backend, fname)
|
self.faces[name] = FaceDetector.detect_faces(detector, self.backend, fname)
|
||||||
print(" Found %d faces for %s" % (len(self.faces), name))
|
print(" Found %d faces for %s" % (len(self.faces), name))
|
||||||
|
cv2.imwrite("/tmp/%s_det.png" % (name,i), self.faces[name])
|
||||||
return len(self.faces[name])
|
return len(self.faces[name])
|
||||||
|
|
||||||
|
|
||||||
@ -84,16 +101,20 @@ class Deepfacex(FaceClass):
|
|||||||
detector_backend=self.backend, distance_metric="euclidean", enforce_detection=False, align=True, normalization="base")
|
detector_backend=self.backend, distance_metric="euclidean", enforce_detection=False, align=True, normalization="base")
|
||||||
return verification
|
return verification
|
||||||
|
|
||||||
def facematch(self):
|
def facematch(self, name1, name2):
|
||||||
dets1, scores1 = self.dlib_detector2("localcam")
|
dets1, scores1, best1 = self.dlib_detector2(name1)
|
||||||
|
self.tree["img1_name"] = name1
|
||||||
self.tree["img1_faces"] = len(dets1)
|
self.tree["img1_faces"] = len(dets1)
|
||||||
if len(dets1) > 0:
|
if len(dets1) > 0:
|
||||||
self.tree["img1_qual"] = scores1[0]
|
self.tree["img1_best"] = best1
|
||||||
|
self.tree["img1_qual"] = scores1[best1]
|
||||||
|
|
||||||
dets2, scores2 = self.dlib_detector2("regula")
|
dets2, scores2, best2 = self.dlib_detector2(name2)
|
||||||
|
self.tree["imgs_name"] = name2
|
||||||
self.tree["img2_faces"] = len(dets2)
|
self.tree["img2_faces"] = len(dets2)
|
||||||
if len(dets2) > 0:
|
if len(dets2) > 0:
|
||||||
self.tree["img2_qual"] = scores2[0]
|
self.tree["img2_best"] = best2
|
||||||
|
self.tree["img2_qual"] = scores2[best2]
|
||||||
|
|
||||||
if len(dets1) < 1:
|
if len(dets1) < 1:
|
||||||
return '{ "status":787101, "remark":"no faces in cam image", "data":%s }' % (json.dumps(self.tree))
|
return '{ "status":787101, "remark":"no faces in cam image", "data":%s }' % (json.dumps(self.tree))
|
||||||
@ -103,6 +124,9 @@ class Deepfacex(FaceClass):
|
|||||||
verif = d.process()
|
verif = d.process()
|
||||||
self.tree["score"] = verif["distance"]
|
self.tree["score"] = verif["distance"]
|
||||||
self.tree["threshold"] = verif["threshold"]
|
self.tree["threshold"] = verif["threshold"]
|
||||||
|
if verif["distance"] > verif["threshold"]:
|
||||||
|
self.tree["matched"] = 1
|
||||||
|
|
||||||
return '{ "status":0, "remark":"OK", "data":%s }' % (json.dumps(self.tree))
|
return '{ "status":0, "remark":"OK", "data":%s }' % (json.dumps(self.tree))
|
||||||
|
|
||||||
|
|
||||||
@ -119,12 +143,14 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
# Test the dlib image detector
|
# Test the dlib image detector
|
||||||
d = Deepfacex()
|
d = Deepfacex()
|
||||||
d.init("dlib","SFace")
|
d.init("yolov8","SFace")
|
||||||
|
|
||||||
# kiosk test
|
# kiosk test
|
||||||
if sys.argv[1]=="kiosk":
|
if sys.argv[1]=="kiosk":
|
||||||
print(d.load("localcam","regula","/tmp/localcam.png","/tmp/regula/Portrait_0.jpg"))
|
print(d.load("localcam","regula","/tmp/localcam.png","/tmp/regula/Portrait_0.jpg"))
|
||||||
print (d.facematch())
|
d.detect_all("localcam", "/tmp/localcam.png")
|
||||||
|
d.detect_all("regula", "/tmp/regula/Portrait_0.jpg")
|
||||||
|
#print (d.facematch("localcam","regula"))
|
||||||
|
|
||||||
# quick test
|
# quick test
|
||||||
if sys.argv[1]=="quick":
|
if sys.argv[1]=="quick":
|
||||||
|
|||||||
@ -39,131 +39,98 @@ import os
|
|||||||
import face_recognition
|
import face_recognition
|
||||||
import json
|
import json
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
from faceclass import FaceClass
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
class FaceRecognition(object):
|
class FaceRecognition(FaceClass):
|
||||||
|
|
||||||
name = "face_recognition_face_match"
|
name = "face_recognition_face_match"
|
||||||
IN_ENGLISH = "A class for checking the similarity between two faces."
|
IN_ENGLISH = "A class for checking the similarity between two faces."
|
||||||
dev1 = "some_device"
|
|
||||||
dev2 = "some_other_device"
|
|
||||||
id_qual = 0.7
|
id_qual = 0.7
|
||||||
photo_qual = 0.7
|
photo_qual = 0.7
|
||||||
face_match_json = None
|
face_match_json = None
|
||||||
conf = []
|
conf = []
|
||||||
imgs = {}
|
|
||||||
|
|
||||||
# Load the config
|
|
||||||
def init(self):
|
|
||||||
with open("/etc/ukdi.json","r") as f:
|
|
||||||
self.conf = json.loads(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
#id_image_filepath = '/home/lucas-acm/Dispension/UKDI_testdata/LW_cardscan/Portrait_0.jpg'
|
def get_faces(self, name):
|
||||||
#photo_image_filepath = '/home/lucas-acm/Dispension/UKDI_testdata/LW.jpg'
|
print("** get_faces ... %s" % name)
|
||||||
#id_image_filepath = '/tmp/regula/Portrait_0.jpg'
|
|
||||||
#photo_image_filepath = '/home/disp/Pictures/realsense_test.jpg'
|
|
||||||
|
|
||||||
def load(self, dev, image_filepath):
|
|
||||||
self.dev1 = dev1
|
|
||||||
self.dev2 = dev2
|
|
||||||
print("id_image_filepath: " + id_image_filepath)
|
|
||||||
try:
|
|
||||||
# load/encode pic1
|
|
||||||
img1 = cv2.imread(id_image_filepath)
|
|
||||||
self.id_image = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
|
|
||||||
# load/encode pic2
|
|
||||||
img2 = cv2.imread(photo_image_filepath)
|
|
||||||
self.photo_image = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
|
|
||||||
#cv2.imshow("id..",self.id_image)
|
|
||||||
#cv2.imshow("id..",self.photo_image)
|
|
||||||
#cv2.waitKey(0)
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def load2(self, dev1, dev2, id_image_filepath, photo_image_filepath):
|
|
||||||
self.dev1 = dev1
|
|
||||||
self.dev2 = dev2
|
|
||||||
print("id_image_filepath: " + id_image_filepath)
|
|
||||||
try:
|
|
||||||
# load/encode pic1
|
|
||||||
img1 = cv2.imread(id_image_filepath)
|
|
||||||
self.id_image = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
|
|
||||||
# load/encode pic2
|
|
||||||
img2 = cv2.imread(photo_image_filepath)
|
|
||||||
self.photo_image = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
|
|
||||||
#cv2.imshow("id..",self.id_image)
|
|
||||||
#cv2.imshow("id..",self.photo_image)
|
|
||||||
#cv2.waitKey(0)
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_faces(self):
|
|
||||||
print("** get_faces ...")
|
|
||||||
try:
|
try:
|
||||||
# Get all faces from images with qualities, landmarks, and embeddings
|
# Get all faces from images with qualities, landmarks, and embeddings
|
||||||
boxes1 = face_recognition.face_locations(self.photo_image, number_of_times_to_upsample=2, model='hog')
|
boxes = face_recognition.face_locations(self.imgs[name], number_of_times_to_upsample=2, model='hog')
|
||||||
boxes2 = face_recognition.face_locations(self.id_image, number_of_times_to_upsample=2, model='hog')
|
print("found %d boxes for %s" % (len(boxes), name) )
|
||||||
#print("n boxes = ", boxes)
|
|
||||||
print("found %d + %d boxes" % (len(boxes1), len(boxes2)))
|
|
||||||
# Get numerical representation of faces (required for face match)
|
# Get numerical representation of faces (required for face match)
|
||||||
self.id_enc = face_recognition.face_encodings(self.id_image)[0]
|
self.encs[name] = face_recognition.face_encodings(self.imgs[name])[0]
|
||||||
self.photo_enc = face_recognition.face_encodings(self.photo_image)[0]
|
#print("encoding for %s : " % name, self.encs[name])
|
||||||
#print(self.id_enc)
|
|
||||||
#print(self.photo_enc)
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
return "image processing exception "+str(ex)
|
self.errstr = "image processing exception at get_faces: "+str(ex)
|
||||||
return len(boxes1)+len(boxes2)
|
return -1
|
||||||
|
return len(boxes)
|
||||||
|
|
||||||
|
|
||||||
# return " id=%d photo=%d result=%d " % (self.id_face, self.photo_face, len(self.image_inference_result))
|
# return " id=%d photo=%d result=%d " % (self.id_face, self.photo_face, len(self.image_inference_result))
|
||||||
|
|
||||||
|
|
||||||
def get_landmarks(self):
|
def get_landmarks(self, name):
|
||||||
self.landmarks = face_recognition.face_landmarks(self.photo_image)
|
self.landmarks = face_recognition.face_landmarks(self.imgs[name])
|
||||||
print(self.landmarks)
|
print(self.landmarks)
|
||||||
|
return self.landmarks
|
||||||
|
|
||||||
|
|
||||||
def compute_scores(self):
|
def compute_scores(self, name1, name2):
|
||||||
print("** computing...")
|
print("** computing ... %s vs %s" % (name1,name2))
|
||||||
try:
|
try:
|
||||||
res = face_recognition.compare_faces([self.id_enc], self.photo_enc)
|
res = face_recognition.compare_faces([self.encs[name1]], self.encs[name2])
|
||||||
print("Match is ",res)
|
print("Match is ",res)
|
||||||
self.match_score = 1000 * (1 - face_recognition.face_distance([self.id_enc], self.photo_enc))
|
self.match_score = 1000 * (1 - face_recognition.face_distance([self.encs[name1]], self.encs[name2]))
|
||||||
print("Score is ",self.match_score)
|
print("Score is ",self.match_score)
|
||||||
|
|
||||||
# Create .json
|
# Create .json
|
||||||
self.face_match_json = {"device1":self.dev1,
|
self.tree["score"] = self.match_score[0]
|
||||||
"device2":self.dev2,
|
|
||||||
"passmark":380,
|
|
||||||
"device1_qual":0.5,
|
|
||||||
"device2_qual":0.5,
|
|
||||||
"match_score":self.match_score[0]}
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
return "image comparison exception "+str(ex)
|
self.errstr = "image comparison exception at compute_scores: "+str(ex)
|
||||||
|
return -1
|
||||||
|
return self.match_score
|
||||||
|
|
||||||
def get_scores(self):
|
def get_scores(self):
|
||||||
return json.dumps(self.face_match_json)
|
return json.dumps(self.tree)
|
||||||
|
|
||||||
|
|
||||||
|
def detect_all(self):
|
||||||
|
n=0
|
||||||
|
self.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
|
||||||
|
self.get_faces("localcam")
|
||||||
|
self.get_faces("regula")
|
||||||
|
print("computed ... ", d.compute_scores("regula","localcam"))
|
||||||
|
print("scores ... " , d.get_scores())
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
d = FaceRecognition()
|
d = FaceRecognition()
|
||||||
|
|
||||||
|
if sys.argv[1]=="kiosk":
|
||||||
|
# lfw
|
||||||
|
n=0
|
||||||
|
d.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
|
||||||
|
d.get_faces("localcam")
|
||||||
|
d.get_faces("regula")
|
||||||
|
print("computed ... ", d.compute_scores("regula","localcam"))
|
||||||
|
print("scores ... " , d.get_scores())
|
||||||
|
#print(d.get_landmarks("localcam"))
|
||||||
|
#print(d.get_landmarks("regula"))
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if sys.argv[1]=="match":
|
if sys.argv[1]=="match":
|
||||||
# lfw
|
# lfw
|
||||||
n=0
|
n=0
|
||||||
print("LFW Matching")
|
print("LFW Matching")
|
||||||
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/0002")):
|
for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/0002")):
|
||||||
d.load2("0001","0002", "/home/carl/Downloads/lfw/0001/" + lfw, "/home/carl/Downloads/lfw/0002/" + lfw)
|
d.load2("localcam","regula", "/home/carl/" + lfw, "/home/carl/Downloads/lfw/0002/" + lfw)
|
||||||
d.get_faces()
|
d.get_faces()
|
||||||
d.compute_scores()
|
d.compute_scores()
|
||||||
print(d.get_scores())
|
print(d.get_scores())
|
||||||
|
|||||||
@ -1,17 +1,25 @@
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import cv2
|
import cv2
|
||||||
|
import json
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
class FaceClass(object):
|
class FaceClass(object):
|
||||||
|
|
||||||
imgs = {}
|
imgs = {}
|
||||||
|
encs = {}
|
||||||
faces = {}
|
faces = {}
|
||||||
visual = 0
|
visual = 0
|
||||||
imfiles = []
|
imfiles = []
|
||||||
imnames = {}
|
imnames = {}
|
||||||
|
errstr = ""
|
||||||
|
tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0 }
|
||||||
|
|
||||||
|
|
||||||
# Prep tasks
|
# Prep tasks
|
||||||
|
def init(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Load a pics using the device label
|
# Load a pics using the device label
|
||||||
def load1(self, name,fname):
|
def load1(self, name,fname):
|
||||||
@ -26,10 +34,10 @@ class FaceClass(object):
|
|||||||
print("FaceClass loading files ....................... ")
|
print("FaceClass loading files ....................... ")
|
||||||
if not os.path.isfile(fname1):
|
if not os.path.isfile(fname1):
|
||||||
print("Cant access file ",fname1)
|
print("Cant access file ",fname1)
|
||||||
return False
|
return -1
|
||||||
if not os.path.isfile(fname2):
|
if not os.path.isfile(fname2):
|
||||||
print("Cant access file ",fname2)
|
print("Cant access file ",fname2)
|
||||||
return False
|
return -1
|
||||||
self.imfiles.append(fname1)
|
self.imfiles.append(fname1)
|
||||||
self.imfiles.append(fname2)
|
self.imfiles.append(fname2)
|
||||||
self.imnames[name1] = fname1
|
self.imnames[name1] = fname1
|
||||||
@ -55,14 +63,24 @@ class FaceClass(object):
|
|||||||
|
|
||||||
|
|
||||||
# Find all faces
|
# Find all faces
|
||||||
|
def get_faces(self,name):
|
||||||
|
return -1
|
||||||
|
|
||||||
# Find best face
|
# Find best face
|
||||||
|
def get_best_face(self):
|
||||||
|
return None
|
||||||
|
|
||||||
# Find landmarks
|
# Find landmarks
|
||||||
|
def get_landmarks(self):
|
||||||
|
return '{ "status": 9999 }'
|
||||||
|
|
||||||
# Find metadata (age etc)
|
# Find metadata (age etc)
|
||||||
|
def get_metadata(self,name):
|
||||||
|
return None
|
||||||
|
|
||||||
# Match two faces
|
# Match two faces
|
||||||
|
def process(self,name1,name2):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -66,7 +66,7 @@ class yoloserv(object):
|
|||||||
self.facematcher.init("dlib","SFace")
|
self.facematcher.init("dlib","SFace")
|
||||||
if "face_recognition" in self.devices:
|
if "face_recognition" in self.devices:
|
||||||
print("Loading deepface facematch...")
|
print("Loading deepface facematch...")
|
||||||
from face_recognition import FaceRecognition
|
from face_recognitionx import FaceRecognition
|
||||||
self.facematcher = FaceRecognition()
|
self.facematcher = FaceRecognition()
|
||||||
self.facematcher.init()
|
self.facematcher.init()
|
||||||
|
|
||||||
@ -179,7 +179,7 @@ class yoloserv(object):
|
|||||||
return '{ "status":777244, "remark":"suitable yolo_device" }'
|
return '{ "status":777244, "remark":"suitable yolo_device" }'
|
||||||
|
|
||||||
if self.conf["emulate_facematch"]:
|
if self.conf["emulate_facematch"]:
|
||||||
return '{ "status":0, "remark":"OK(Emulated)", "data":{"device1":"%s","device2":"%s","device1_qual":123,"device2_qual":234,"match_score":600} }' % (dev1,dev2)
|
return '{ "status":0, "remark":"OK(Emulated)", "data":{"device1":"%s","device2":"%s","device1_qual":123,"device2_qual":234,"score":600} }' % (dev1,dev2)
|
||||||
|
|
||||||
if dev1 == "regula":
|
if dev1 == "regula":
|
||||||
img1 = "/tmp/regula/Portrait_0.jpg"
|
img1 = "/tmp/regula/Portrait_0.jpg"
|
||||||
@ -195,21 +195,14 @@ class yoloserv(object):
|
|||||||
status = self.facematcher.load(dev1, dev2, img1, img2)
|
status = self.facematcher.load(dev1, dev2, img1, img2)
|
||||||
if not status:
|
if not status:
|
||||||
return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
||||||
|
if self.facematcher.get_faces(dev1) < 1:
|
||||||
#status = self.facematcher.analyse()
|
return '{ "status":777243, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev1)
|
||||||
#if status is not None:
|
if self.facematcher.get_faces(dev2) < 1:
|
||||||
# return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
return '{ "status":777244, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev2)
|
||||||
|
if self.facematcher.compute_scores(dev1,dev2) < 1:
|
||||||
#status = self.facematcher.get_faces()
|
return '{ "status":777245, "remark":"face matching failed", "guilty_param":"image", "guilty_value":"%s" }' % (self.facematcher.errstr)
|
||||||
#if status is not None:
|
jsonstr = self.facematcher.get_scores()
|
||||||
# return '{ "status":777245, "remark":"face finding failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr)
|
||||||
|
|
||||||
#status = self.facematcher.compute_scores()
|
|
||||||
#if status is not None:
|
|
||||||
# return '{ "status":777243, "remark":"face scoring failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
|
||||||
|
|
||||||
jsonstr = self.facematcher.process()
|
|
||||||
return '{ "status":0, "remark":"OK", "data":%s }' % (jsonstr)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user