new dev towards ad traffic recognition

This commit is contained in:
carl 2024-09-08 22:24:43 -03:00
parent 7f2f669e29
commit b69c4f9ecc
8 changed files with 94 additions and 144 deletions

BIN
models/emotion_model.hdf5 Normal file

Binary file not shown.

View File

@ -101,6 +101,7 @@ do
PYP="$PYP:$PYP_DEEPFACE"
;;
"regula") ;;
"traffic") ;;
"camera") ;;
*) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
exit 1

View File

@ -80,10 +80,9 @@ class Deepfacex(FaceClass):
return json.dumps(verification)
def metadata(self,name):
f1 = "/tmp/%s.png" % (name)
metadata = DeepFace.analyze(img_path = f1, actions = ["age", "gender", "emotion", "race"])
return json.dumps(metadata)
def analyze(self,name):
result = DeepFace.analyze(self.imgs["name"], actions=['age', 'gender', 'emotion', 'race'], enforce_detection=False)
return result
#

View File

@ -38,8 +38,11 @@ import sys
import os
import face_recognition
import json
import numpy as np
import urllib.request
from faceclass import FaceClass
from keras.models import load_model
import time
#
@ -59,8 +62,12 @@ class FaceRecognition(FaceClass):
#def init():
#model = load_model("./emotion_detector_models/model.hdf5")
#def prep_detectors(self):
# @doc find all the faces in the named image
# @doc find all the faces in the named image.
# The detectors tend to return propritery formats, so this "detect" method
# is going to be new for each implementation of FaceClass
def detect(self, name):
self.tree["detectms"] = time.time()
boxes = []
@ -82,24 +89,25 @@ class FaceRecognition(FaceClass):
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s, "time":%d }' % (len(self.boxes), json.dumps(self.boxes), self.tree["detectms"] )
# @doc find the landmarks of the given face
# @doc find the landmarks of the given face (eyes, mouth, nose etc)
def landmarks(self, name):
landmarks = face_recognition.face_landmarks(self.imgs[name])
return '{ "status":0, "remark":"OK", "landmarks":%s }' % json.dumps(landmarks)
# @doc find the metadata of the given face
def metadata(self, name):
# @doc find the metadata of the given face (emotion, age, gender, race)
def metadata(self, npimg):
model = load_model("models/emotion_model.hdf5")
print(time.time())
emotion_dict= {'Angry': 0, 'Sad': 5, 'Neutral': 4, 'Disgust': 1, 'Surprise': 6, 'Fear': 2, 'Happy': 3}
face_image = cv2.imread("..test_images/39.jpg")
face_image = cv2.resize(face_image, (48,48))
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = np.reshape(face_image, [1, face_image.shape[0], face_image.shape[1], 1])
model = load_model("./emotion_detector_models/model.hdf5")
predicted_class = np.argmax(model.predict(face_image))
im = cv2.resize(npimg, (64, 64))
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = np.reshape(im, [1, im.shape[0], im.shape[1], 1])
predicted_class = np.argmax(model.predict(im))
label_map = dict((v,k) for k,v in emotion_dict.items())
predicted_label = label_map[predicted_class]
return '{ "status":88324, "remark":"override this", "landmarks":%s }' % json.dumps(landmarks)
print(time.time())
return predicted_label
# @doc compare two named images, previously loaded
@ -173,8 +181,11 @@ if __name__ == '__main__':
d.stats("messi2_crop")
d.compare("messi4_crop","messi2_rect")
if sys.argv[1]=="traffic":
jsonstr = d.traffic("traffic4", "testimg/messi4.jpg")
print(jsonstr)
if sys.argv[1]=="crowd":
if sys.argv[1]=="group":
jsonstr = d.crowd_vs_govid("pic1", "testimg/messi4.jpg", 0, "pic2", "testimg/messi2.jpg", 0)
print(jsonstr)

View File

@ -15,17 +15,19 @@ class FaceClass(object):
model = None
imgs = {}
encs = {}
faces = {}
crops = {}
visual = 0
files = {}
boxes = []
jsonx = ""
errstr = ""
vidcap = None
tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0, "detectms":0, "comparems":0 }
def json2obj(self,jsonx):
print(jsonx)
self.jsonx = jsonx
return json.loads(jsonx)
@ -91,11 +93,14 @@ class FaceClass(object):
return '{ "status":88241, "remark":"override this!" }'
# @doc find the biggest face in the named image
# Imge has origin at top left and value are b = [ X1, Y1, X2, Y2 ]
def ideal(self, name, rectname, cropname):
found = -1
biggest = -1
self.crops[name] = []
print("** faceclass::ideal ... %s with %d boxes => %s + %s" % (name, len(self.boxes), rectname, cropname ))
# resize boxes
for i in range(len(self.boxes)):
@ -110,6 +115,7 @@ class FaceClass(object):
b = self.boxes[found]
# extract crops and highlights - colours are BGR
self.imgs[cropname] = self.crop(name,b[0],b[1],b[2],b[3])
self.crops[name].append(self.imgs[cropname])
self.imgs[rectname] = deepcopy(self.imgs[name])
#print(self.imgs[name])
#print(self.imgs[rectname])
@ -140,13 +146,10 @@ class FaceClass(object):
return '{ "status":88245, "remark":"override this!" }'
# @doc This does everything for you.
# If you are smartserv, "crowd" means cam and "govid" means regula pic
def crowd_vs_govid(self, name1,file1,scale1str, name2,file2,scale2str):
def crowd_vs_govid(self, name1,file1,scale1, name2,file2,scale2):
print("##1##")
scale1 = float(scale1str)
scale2 = float(scale2str)
if self.json2obj(self.load1(name1, file1))["status"] != 0:
return self.jsonx
if scale1 !=0:
@ -184,6 +187,28 @@ class FaceClass(object):
return jsonstr
# @doc This does deomgraphic examination on a pic.
# If you are smartserv, "crowd" means cam and "govid" means regula pic
def traffic(self, name, file, scale=0):
print("##1##")
jsons = []
if self.json2obj(self.load1(name, file))["status"] != 0:
return self.jsonx
if scale !=0:
self.shrink(name,scale)
if self.json2obj(self.detect(name))["status"] != 0:
return self.jsonx
for i in range(len(self.boxes)):
b = self.boxes[i]
print(">>>>" , b)
analysis = self.metadata(self.imgs[name][b[0]:b[0]+b[1],b[2]:b[2]+b[3]])
jsons.append(analysis)
print(json.dumps(jsons))
return jsonx
###### ##### # #####
# # # # #
##### # # # #
@ -232,6 +257,7 @@ class FaceClass(object):
##### ###### ##### # # ##### # #
# # # # # # # # ## #
# # ##### # # # # # # # #

View File

@ -26,7 +26,6 @@ class Paravision(FaceClass):
pass
# @doc find all the faces in the named image
def detect(self, name):
boxes = []
@ -34,16 +33,12 @@ class Paravision(FaceClass):
print("** face_recognition::detect ... %s" % name)
try:
# Get all faces from images with qualities, landmarks, and embeddings
res = self.sdk.get_faces([self.imgs[name]], qualities=True, landmarks=True, embeddings=True)
res = self.sdk.get_faces([self.imgs[name]])
boxes = res.faces
print(boxes)
for a in boxes:
print(box)
# box is somehow = y1 / x2 / y2 / x1 for face_recognition .
# =
# Thats crazy lol. We need to fix that to x1, y1, x2, y2 with origin at top left
for b in boxes:
self.boxes.append((b[3],b[0],b[1],b[2]))
b = b.bounding_box
#print(b)
self.boxes.append((int(b.origin.x),int(b.origin.y),int(b.origin.x+b.width),int(b.origin.y+b.height)))
print("found %d boxes for %s" % (len(self.boxes), name) )
except Exception as ex:
self.errstr = "image processing exception at get_faces: "+str(ex)
@ -52,123 +47,33 @@ class Paravision(FaceClass):
# Assess the face that was read in
def processiii(self):
# Get all faces metadata
print("Finding faces in %s" %(self.imgpath))
faces = self.sdk.get_faces([self.image], qualities=True, landmarks=True, embeddings=True)
print("Getting metadata")
inferences = faces.image_inferences
print("Getting best face")
ix = inferences[0].most_prominent_face_index()
print("Getting a mathematical mode of that best face")
self.model = inferences[0].faces[ix].embedding
print("Getting image quality scores..")
self.score = round(1000*inferences[0].faces[ix].quality)
print("Score was %d" %(self.score))
return self.score
# Compare to a face in another Facematch instance
def compare(self,other):
# Get face match score
return self.sdk.get_match_score(self.model, other.model)
#mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
#
def load(self, dev1, dev2, id_image_filepath, photo_image_filepath):
print("## loading images", dev1, dev2)
self.dev1 = dev1
self.dev2 = dev2
# @doc compare two named images, previously loaded
def compare(self, name1, name2):
print("** face_recognition::compare ... %s vs %s" % (name1,name2))
self.encs[name1] = self.sdk.get_embedding_from_prepared_image(self.imgs[name1])
self.encs[name2] = self.sdk.get_embedding_from_prepared_image(self.imgs[name2])
#if self.encs[name1]==[]:
# return '{ "status":14330, "remark":"could not encode image", "guilty_param":"img1", "guilty_value":"%s" }' % name1
#if self.encs[name2]==[]:
# return '{ "status":14331, "remark":"could not encode image", "guilty_param":"img2", "guilty_value":"%s" }' % name2
print(self.encs.keys())
print(self.encs[name1])
print(self.encs[name2])
try:
self.id_image = pru.load_image(id_image_filepath)
except Exception as e:
return "id image loading failed ", e
try:
self.photo_image = pru.load_image(photo_image_filepath)
except Exception as e:
return "client image loading failed ", e
return None
def get_faces(self):
try:
# Get all faces from images with qualities, landmarks, and embeddings
print("Finding faces...")
self.inference_result = self.sdk.get_faces([self.id_image, self.photo_image], qualities=True, landmarks=True, embeddings=True)
print("Inferences...")
self.image_inference_result = self.inference_result.image_inferences
if len(self.image_inference_result)==0:
return "no inferences found"
# Get most prominent face
print("Most prominent...")
self.id_face = self.image_inference_result[0].most_prominent_face_index()
self.photo_face = self.image_inference_result[1].most_prominent_face_index()
if self.id_face<0:
return "no id face found"
if self.photo_face<0:
return "no live face found"
# Get numerical representation of faces (required for face match)
print("stats...")
if (len(self.image_inference_result)<2):
return "ID or human face could not be recognised"
self.id_emb = self.image_inference_result[0].faces[self.id_face].embedding
self.photo_emb = self.image_inference_result[1].faces[self.photo_face].embedding
except Exception as ex:
return "image processing exception "+str(ex)
return None
def compute_scores(self):
try:
# Get image quality scores (how 'good' a face is)
self.id_qual = self.image_inference_result[0].faces[self.id_face].quality
self.photo_qual = self.image_inference_result[1].faces[self.photo_face].quality
self.id_qual = round(self.id_qual, 3)
self.photo_qual = round(self.photo_qual, 3)
# Get face match score
self.match_score = self.sdk.get_match_score(self.id_emb, self.photo_emb)
res = self.sdk.get_match_score(self.model, other.model)
print("Match is ",res)
#self.match_score = 1000 * (1 - face_recognition.face_distance([self.encs[name1][0]], self.encs[name2][0]))
#print("Score is ",self.match_score)
# Create .json
self.face_match_json = {"device1":self.dev1,
"device2":self.dev2,
"passmark":500,
"device1_qual":self.id_qual,
"device2_qual":self.photo_qual,
"match_score":self.match_score}
#return json.dumps(self.face_match_json)
#print(self.face_match_json)
# Send to core
#url = "%s/notify/%s/%s" % (self.conf["core"], self.conf["identity"], face_match_json)
#url = url.replace(" ", "%20") # Remove spaces
#buf = []
#req = urllib.request.Request( url )
#with urllib.request.urlopen(req) as response:
#print(response.read())
self.tree["score"] = self.match_score[0]
except Exception as ex:
return str(ex)
def get_scores(self):
return json.dumps(self.face_match_json)
print("** paravision::compare exception ... " + str(ex) )
self.errstr = "image comparison exception at compute_scores: "+str(ex)
return '{ "status":332410, "remark":"%s" }' % self.errstr
return '{ "status":0, "remark":"OK", "score":%d }' % self.match_score[0]
if __name__ == '__main__':

View File

@ -32,6 +32,7 @@ class yoloserv(object):
palmdetector = None
facematcher = None
palmmatcher = None
traffic = None
ir_camera = None
devices = []
points = []
@ -215,6 +216,13 @@ class yoloserv(object):
return self.facematcher.compare(name1,name2)
# Traffic analysis
@cherrypy.expose
def svc_traffic(self,infile=None):
return self.facematcher.traffic(infile)
@cherrypy.expose
def shutdown(self):

BIN
testimg/crowd.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB