diff --git a/src/face_recognitionx.py b/src/face_recognitionx.py new file mode 100644 index 000000000..8f7b6e2cd --- /dev/null +++ b/src/face_recognitionx.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +""" +# +# DISPENSION CONFIDENTIAL +# +# [2020] - [2021] Dispension Industries Limited. +# Portions Copyright © 2014-2020 Atlantean Technical Solutions Limited +# with full rights granted to Dispension & Successors. +# +# All Rights Reserved. +# +# NOTICE: All information contained herein is, and remains +# the property of Dispension Industries Limited. +# The intellectual and technical concepts contained +# herein are proprietary to Dispension Industries Limited +# and its suppliers and may be covered by U.S. and Foreign Patents, +# patents in process, and are protected by trade secret or copyright law. +# Dissemination of this information or reproduction of this material +# is strictly forbidden unless prior written permission is obtained +# from Dispension Industries Limited. +# + +Old: +Use Paravision recognition SDK to compare two images of faces +Returns face quality scores of both images and a match score +Quality score range from 0 to 1, where 1 is highest quality for face recognition / matching +Match score ranges from 400 to 700, where higher score is higher chance of match + +New: +As of Dec 2022, use an open source equivalent to paravision +""" +# supt apt install cmake (for building face_recognition) +# sudo pip3 install opencv2 +# sudo pip3 install face_recognition + +import cv2 +import sys +import os +import face_recognition +import json +import urllib.request + +import time + + +class FaceRecognition(object): + + name = "face_recognition_face_match" + IN_ENGLISH = "A class for checking the similarity between two faces." + dev1 = "some_device" + dev2 = "some_other_device" + id_qual = 0.7 + photo_qual = 0.7 + face_match_json = None + conf = [] + imgs = {} + + # Load the config + def init(self): + with open("/etc/ukdi.json","r") as f: + self.conf = json.loads(f.read()) + + + #id_image_filepath = '/home/lucas-acm/Dispension/UKDI_testdata/LW_cardscan/Portrait_0.jpg' + #photo_image_filepath = '/home/lucas-acm/Dispension/UKDI_testdata/LW.jpg' + #id_image_filepath = '/tmp/regula/Portrait_0.jpg' + #photo_image_filepath = '/home/disp/Pictures/realsense_test.jpg' + + def load(self, dev, image_filepath): + self.dev1 = dev1 + self.dev2 = dev2 + print("id_image_filepath: " + id_image_filepath) + try: + # load/encode pic1 + img1 = cv2.imread(id_image_filepath) + self.id_image = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) + # load/encode pic2 + img2 = cv2.imread(photo_image_filepath) + self.photo_image = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) + #cv2.imshow("id..",self.id_image) + #cv2.imshow("id..",self.photo_image) + #cv2.waitKey(0) + return True + except: + return None + + + def load2(self, dev1, dev2, id_image_filepath, photo_image_filepath): + self.dev1 = dev1 + self.dev2 = dev2 + print("id_image_filepath: " + id_image_filepath) + try: + # load/encode pic1 + img1 = cv2.imread(id_image_filepath) + self.id_image = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) + # load/encode pic2 + img2 = cv2.imread(photo_image_filepath) + self.photo_image = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) + #cv2.imshow("id..",self.id_image) + #cv2.imshow("id..",self.photo_image) + #cv2.waitKey(0) + return True + except: + return None + + + def get_faces(self): + print("** get_faces ...") + try: + # Get all faces from images with qualities, landmarks, and embeddings + boxes1 = face_recognition.face_locations(self.photo_image, number_of_times_to_upsample=2, model='hog') + boxes2 = face_recognition.face_locations(self.id_image, number_of_times_to_upsample=2, model='hog') + #print("n boxes = ", boxes) + print("found %d + %d boxes" % (len(boxes1), len(boxes2))) + # Get numerical representation of faces (required for face match) + self.id_enc = face_recognition.face_encodings(self.id_image)[0] + self.photo_enc = face_recognition.face_encodings(self.photo_image)[0] + #print(self.id_enc) + #print(self.photo_enc) + except Exception as ex: + return "image processing exception "+str(ex) + return len(boxes1)+len(boxes2) + + +# return " id=%d photo=%d result=%d " % (self.id_face, self.photo_face, len(self.image_inference_result)) + + + def get_landmarks(self): + self.landmarks = face_recognition.face_landmarks(self.photo_image) + print(self.landmarks) + + + def compute_scores(self): + print("** computing...") + try: + res = face_recognition.compare_faces([self.id_enc], self.photo_enc) + print("Match is ",res) + self.match_score = 1000 * (1 - face_recognition.face_distance([self.id_enc], self.photo_enc)) + print("Score is ",self.match_score) + + # Create .json + self.face_match_json = {"device1":self.dev1, + "device2":self.dev2, + "passmark":380, + "device1_qual":0.5, + "device2_qual":0.5, + "match_score":self.match_score[0]} + + except Exception as ex: + return "image comparison exception "+str(ex) + + + def get_scores(self): + return json.dumps(self.face_match_json) + + +if __name__ == '__main__': + + d = FaceRecognition() + + if sys.argv[1]=="match": + # lfw + n=0 + print("LFW Matching") + for lfw in sorted(os.listdir("/home/carl/Downloads/lfw/0002")): + d.load2("0001","0002", "/home/carl/Downloads/lfw/0001/" + lfw, "/home/carl/Downloads/lfw/0002/" + lfw) + d.get_faces() + d.compute_scores() + print(d.get_scores()) + print(d.get_landmarks()) + n+=1 + if n > 1: + sys.exit(0) + diff --git a/src/faceclass.py b/src/faceclass.py new file mode 100644 index 000000000..35918891f --- /dev/null +++ b/src/faceclass.py @@ -0,0 +1,74 @@ +import sys +import os +import cv2 +from matplotlib import pyplot as plt + +class FaceClass(object): + + imgs = {} + faces = {} + visual = 0 + + + # Prep tasks + + # Load a pics using the device label + def load1(self, name,fname): + if not os.path.isfile(fname): + return False + self.imgs[name] = cv2.imread(fname) + #print(" Loaded %s from file %s" % (name, fname)) + return True + + + def load(self, name1,name2,fname1,fname2): + if not os.path.isfile(fname1): + return False + if not os.path.isfile(fname2): + return False + self.imgs[name1] = cv2.imread(fname1) + self.imgs[name2] = cv2.imread(fname2) + if self.visual: + p1 = plt.imshow(name1, self.imgs[name1]) + p2.imshow(name2, self.imgs[name2]) + p1.show() + #print(" Loaded %s from file %s" % (name, fname)) + return True + + + def box(self, name, x, y, w, h): + cv2.rectangle(self.imgs[name],(x,y),(x+w,y+h), (0,255,0), 4) + + + # Load the config + def init(self): + with open("/etc/ukdi.json","r") as f: + self.conf = json.loads(f.read()) + + + # Find all faces + + # Find best face + + # Find landmarks + + # Find metadata (age etc) + + # Match two faces + + + + +if __name__ == '__main__': + # tests + # big/clear big/clear small/side small/clear obscured ig card IR cam some rotated + picfiles = [ "gerrard.jpg", "ronaldo.jpg", "messi1.jpg", "messi2.jpg", "ox_matt.png", "ox_govid.jpg", "cg_ir.jpg", "fivepeople.jpg" ] + + # Test the dlib image detector + d = FaceClass() + + # quick test + if sys.argv[1]=="quick": + d.visual = 1 + d.load("messi1","messi2","testimg/messi1.jpg","testimg/messi2.jpg") + sys.exit(0) diff --git a/src/insightfacex.py b/src/insightfacex.py new file mode 100644 index 000000000..8d1d2cd95 --- /dev/null +++ b/src/insightfacex.py @@ -0,0 +1,15 @@ +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + + +class InsightFace(object): + + app = None + + def init(self): + # Method-1, use FaceAnalysis + self.app = FaceAnalysis(allowed_modules=['detection']) # enable detection model only + self.app.prepare(ctx_id=0, det_size=(640, 640)) diff --git a/src/paravisionx.py b/src/paravisionx.py new file mode 100644 index 000000000..8df497625 --- /dev/null +++ b/src/paravisionx.py @@ -0,0 +1,148 @@ +# +# Paravision based face matcher +# +import json + +from paravision.recognition.exceptions import ParavisionException +from paravision.recognition.engine import Engine +from paravision.recognition.sdk import SDK +import paravision.recognition.utils as pru +#from openvino.inference_engine import Engineq + +#from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID + + + +class Facematch(object): + + def init(self): + print("@@@ initialising paravision") + try: + self.sdk = SDK(engine=Engine.AUTO) + except ParavisionException: + pass + + # Read an image from a file + def loadiii(self, imgpath): + if not os.path.exists(imgpath): + print("File not found ",imgpath) + return False + self.imgpath = imgpath + self.image = pru.load_image(imgpath) + print(self.image) + return True + + # Assess the face that was read in + def processiii(self): + # Get all faces metadata + print("Finding faces in %s" %(self.imgpath)) + faces = self.sdk.get_faces([self.image], qualities=True, landmarks=True, embeddings=True) + print("Getting metadata") + inferences = faces.image_inferences + print("Getting best face") + ix = inferences[0].most_prominent_face_index() + print("Getting a mathematical mode of that best face") + self.model = inferences[0].faces[ix].embedding + print("Getting image quality scores..") + self.score = round(1000*inferences[0].faces[ix].quality) + print("Score was %d" %(self.score)) + return self.score + + # Compare to a face in another Facematch instance + def compare(self,other): + # Get face match score + return self.sdk.get_match_score(self.model, other.model) + + + #mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + + + # + def load(self, dev1, dev2, id_image_filepath, photo_image_filepath): + print("## loading images", dev1, dev2) + self.dev1 = dev1 + self.dev2 = dev2 + try: + self.id_image = pru.load_image(id_image_filepath) + except Exception as e: + return "id image loading failed ", e + try: + self.photo_image = pru.load_image(photo_image_filepath) + except Exception as e: + return "client image loading failed ", e + return None + + + + def get_faces(self): + try: + # Get all faces from images with qualities, landmarks, and embeddings + print("Finding faces...") + self.inference_result = self.sdk.get_faces([self.id_image, self.photo_image], qualities=True, landmarks=True, embeddings=True) + print("Inferences...") + self.image_inference_result = self.inference_result.image_inferences + if len(self.image_inference_result)==0: + return "no inferences found" + + # Get most prominent face + print("Most prominent...") + self.id_face = self.image_inference_result[0].most_prominent_face_index() + self.photo_face = self.image_inference_result[1].most_prominent_face_index() + if self.id_face<0: + return "no id face found" + if self.photo_face<0: + return "no live face found" + + # Get numerical representation of faces (required for face match) + print("stats...") + if (len(self.image_inference_result)<2): + return "ID or human face could not be recognised" + self.id_emb = self.image_inference_result[0].faces[self.id_face].embedding + self.photo_emb = self.image_inference_result[1].faces[self.photo_face].embedding + + except Exception as ex: + return "image processing exception "+str(ex) + + return None + + + + def compute_scores(self): + try: + # Get image quality scores (how 'good' a face is) + self.id_qual = self.image_inference_result[0].faces[self.id_face].quality + self.photo_qual = self.image_inference_result[1].faces[self.photo_face].quality + + self.id_qual = round(self.id_qual, 3) + self.photo_qual = round(self.photo_qual, 3) + + # Get face match score + self.match_score = self.sdk.get_match_score(self.id_emb, self.photo_emb) + + # Create .json + self.face_match_json = {"device1":self.dev1, + "device2":self.dev2, + "passmark":500, + "device1_qual":self.id_qual, + "device2_qual":self.photo_qual, + "match_score":self.match_score} + + #return json.dumps(self.face_match_json) + + #print(self.face_match_json) + + # Send to core + #url = "%s/notify/%s/%s" % (self.conf["core"], self.conf["identity"], face_match_json) + #url = url.replace(" ", "%20") # Remove spaces + #buf = [] + #req = urllib.request.Request( url ) + #with urllib.request.urlopen(req) as response: + #print(response.read()) + + except Exception as ex: + return str(ex) + + + def get_scores(self): + return json.dumps(self.face_match_json) +