new dev towards ad traffic recognition
This commit is contained in:
parent
7f2f669e29
commit
b69c4f9ecc
BIN
models/emotion_model.hdf5
Normal file
BIN
models/emotion_model.hdf5
Normal file
Binary file not shown.
@ -101,6 +101,7 @@ do
|
|||||||
PYP="$PYP:$PYP_DEEPFACE"
|
PYP="$PYP:$PYP_DEEPFACE"
|
||||||
;;
|
;;
|
||||||
"regula") ;;
|
"regula") ;;
|
||||||
|
"traffic") ;;
|
||||||
"camera") ;;
|
"camera") ;;
|
||||||
*) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
|
*) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@ -80,10 +80,9 @@ class Deepfacex(FaceClass):
|
|||||||
return json.dumps(verification)
|
return json.dumps(verification)
|
||||||
|
|
||||||
|
|
||||||
def metadata(self,name):
|
def analyze(self,name):
|
||||||
f1 = "/tmp/%s.png" % (name)
|
result = DeepFace.analyze(self.imgs["name"], actions=['age', 'gender', 'emotion', 'race'], enforce_detection=False)
|
||||||
metadata = DeepFace.analyze(img_path = f1, actions = ["age", "gender", "emotion", "race"])
|
return result
|
||||||
return json.dumps(metadata)
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|||||||
@ -38,8 +38,11 @@ import sys
|
|||||||
import os
|
import os
|
||||||
import face_recognition
|
import face_recognition
|
||||||
import json
|
import json
|
||||||
|
import numpy as np
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from faceclass import FaceClass
|
from faceclass import FaceClass
|
||||||
|
from keras.models import load_model
|
||||||
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
#
|
#
|
||||||
@ -59,8 +62,12 @@ class FaceRecognition(FaceClass):
|
|||||||
#def init():
|
#def init():
|
||||||
#model = load_model("./emotion_detector_models/model.hdf5")
|
#model = load_model("./emotion_detector_models/model.hdf5")
|
||||||
|
|
||||||
|
#def prep_detectors(self):
|
||||||
|
|
||||||
# @doc find all the faces in the named image
|
|
||||||
|
# @doc find all the faces in the named image.
|
||||||
|
# The detectors tend to return propritery formats, so this "detect" method
|
||||||
|
# is going to be new for each implementation of FaceClass
|
||||||
def detect(self, name):
|
def detect(self, name):
|
||||||
self.tree["detectms"] = time.time()
|
self.tree["detectms"] = time.time()
|
||||||
boxes = []
|
boxes = []
|
||||||
@ -82,24 +89,25 @@ class FaceRecognition(FaceClass):
|
|||||||
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s, "time":%d }' % (len(self.boxes), json.dumps(self.boxes), self.tree["detectms"] )
|
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s, "time":%d }' % (len(self.boxes), json.dumps(self.boxes), self.tree["detectms"] )
|
||||||
|
|
||||||
|
|
||||||
# @doc find the landmarks of the given face
|
# @doc find the landmarks of the given face (eyes, mouth, nose etc)
|
||||||
def landmarks(self, name):
|
def landmarks(self, name):
|
||||||
landmarks = face_recognition.face_landmarks(self.imgs[name])
|
landmarks = face_recognition.face_landmarks(self.imgs[name])
|
||||||
return '{ "status":0, "remark":"OK", "landmarks":%s }' % json.dumps(landmarks)
|
return '{ "status":0, "remark":"OK", "landmarks":%s }' % json.dumps(landmarks)
|
||||||
|
|
||||||
|
|
||||||
|
# @doc find the metadata of the given face (emotion, age, gender, race)
|
||||||
# @doc find the metadata of the given face
|
def metadata(self, npimg):
|
||||||
def metadata(self, name):
|
model = load_model("models/emotion_model.hdf5")
|
||||||
|
print(time.time())
|
||||||
emotion_dict= {'Angry': 0, 'Sad': 5, 'Neutral': 4, 'Disgust': 1, 'Surprise': 6, 'Fear': 2, 'Happy': 3}
|
emotion_dict= {'Angry': 0, 'Sad': 5, 'Neutral': 4, 'Disgust': 1, 'Surprise': 6, 'Fear': 2, 'Happy': 3}
|
||||||
face_image = cv2.imread("..test_images/39.jpg")
|
im = cv2.resize(npimg, (64, 64))
|
||||||
face_image = cv2.resize(face_image, (48,48))
|
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
|
||||||
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
|
im = np.reshape(im, [1, im.shape[0], im.shape[1], 1])
|
||||||
face_image = np.reshape(face_image, [1, face_image.shape[0], face_image.shape[1], 1])
|
predicted_class = np.argmax(model.predict(im))
|
||||||
model = load_model("./emotion_detector_models/model.hdf5")
|
|
||||||
predicted_class = np.argmax(model.predict(face_image))
|
|
||||||
label_map = dict((v,k) for k,v in emotion_dict.items())
|
label_map = dict((v,k) for k,v in emotion_dict.items())
|
||||||
predicted_label = label_map[predicted_class]
|
predicted_label = label_map[predicted_class]
|
||||||
return '{ "status":88324, "remark":"override this", "landmarks":%s }' % json.dumps(landmarks)
|
print(time.time())
|
||||||
|
return predicted_label
|
||||||
|
|
||||||
|
|
||||||
# @doc compare two named images, previously loaded
|
# @doc compare two named images, previously loaded
|
||||||
@ -173,8 +181,11 @@ if __name__ == '__main__':
|
|||||||
d.stats("messi2_crop")
|
d.stats("messi2_crop")
|
||||||
d.compare("messi4_crop","messi2_rect")
|
d.compare("messi4_crop","messi2_rect")
|
||||||
|
|
||||||
|
if sys.argv[1]=="traffic":
|
||||||
|
jsonstr = d.traffic("traffic4", "testimg/messi4.jpg")
|
||||||
|
print(jsonstr)
|
||||||
|
|
||||||
if sys.argv[1]=="crowd":
|
if sys.argv[1]=="group":
|
||||||
jsonstr = d.crowd_vs_govid("pic1", "testimg/messi4.jpg", 0, "pic2", "testimg/messi2.jpg", 0)
|
jsonstr = d.crowd_vs_govid("pic1", "testimg/messi4.jpg", 0, "pic2", "testimg/messi2.jpg", 0)
|
||||||
print(jsonstr)
|
print(jsonstr)
|
||||||
|
|
||||||
|
|||||||
@ -15,17 +15,19 @@ class FaceClass(object):
|
|||||||
model = None
|
model = None
|
||||||
imgs = {}
|
imgs = {}
|
||||||
encs = {}
|
encs = {}
|
||||||
faces = {}
|
crops = {}
|
||||||
visual = 0
|
visual = 0
|
||||||
files = {}
|
files = {}
|
||||||
boxes = []
|
boxes = []
|
||||||
jsonx = ""
|
jsonx = ""
|
||||||
errstr = ""
|
errstr = ""
|
||||||
|
vidcap = None
|
||||||
tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0, "detectms":0, "comparems":0 }
|
tree = { "device1":"NA", "device2":"NA", "threshold":380, "device1_qual":0.5, "device2_qual":0.5, "score":0, "detectms":0, "comparems":0 }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def json2obj(self,jsonx):
|
def json2obj(self,jsonx):
|
||||||
|
print(jsonx)
|
||||||
self.jsonx = jsonx
|
self.jsonx = jsonx
|
||||||
return json.loads(jsonx)
|
return json.loads(jsonx)
|
||||||
|
|
||||||
@ -91,11 +93,14 @@ class FaceClass(object):
|
|||||||
return '{ "status":88241, "remark":"override this!" }'
|
return '{ "status":88241, "remark":"override this!" }'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# @doc find the biggest face in the named image
|
# @doc find the biggest face in the named image
|
||||||
# Imge has origin at top left and value are b = [ X1, Y1, X2, Y2 ]
|
# Imge has origin at top left and value are b = [ X1, Y1, X2, Y2 ]
|
||||||
def ideal(self, name, rectname, cropname):
|
def ideal(self, name, rectname, cropname):
|
||||||
found = -1
|
found = -1
|
||||||
biggest = -1
|
biggest = -1
|
||||||
|
self.crops[name] = []
|
||||||
print("** faceclass::ideal ... %s with %d boxes => %s + %s" % (name, len(self.boxes), rectname, cropname ))
|
print("** faceclass::ideal ... %s with %d boxes => %s + %s" % (name, len(self.boxes), rectname, cropname ))
|
||||||
# resize boxes
|
# resize boxes
|
||||||
for i in range(len(self.boxes)):
|
for i in range(len(self.boxes)):
|
||||||
@ -110,6 +115,7 @@ class FaceClass(object):
|
|||||||
b = self.boxes[found]
|
b = self.boxes[found]
|
||||||
# extract crops and highlights - colours are BGR
|
# extract crops and highlights - colours are BGR
|
||||||
self.imgs[cropname] = self.crop(name,b[0],b[1],b[2],b[3])
|
self.imgs[cropname] = self.crop(name,b[0],b[1],b[2],b[3])
|
||||||
|
self.crops[name].append(self.imgs[cropname])
|
||||||
self.imgs[rectname] = deepcopy(self.imgs[name])
|
self.imgs[rectname] = deepcopy(self.imgs[name])
|
||||||
#print(self.imgs[name])
|
#print(self.imgs[name])
|
||||||
#print(self.imgs[rectname])
|
#print(self.imgs[rectname])
|
||||||
@ -140,13 +146,10 @@ class FaceClass(object):
|
|||||||
return '{ "status":88245, "remark":"override this!" }'
|
return '{ "status":88245, "remark":"override this!" }'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# @doc This does everything for you.
|
# @doc This does everything for you.
|
||||||
# If you are smartserv, "crowd" means cam and "govid" means regula pic
|
# If you are smartserv, "crowd" means cam and "govid" means regula pic
|
||||||
def crowd_vs_govid(self, name1,file1,scale1str, name2,file2,scale2str):
|
def crowd_vs_govid(self, name1,file1,scale1, name2,file2,scale2):
|
||||||
print("##1##")
|
print("##1##")
|
||||||
scale1 = float(scale1str)
|
|
||||||
scale2 = float(scale2str)
|
|
||||||
if self.json2obj(self.load1(name1, file1))["status"] != 0:
|
if self.json2obj(self.load1(name1, file1))["status"] != 0:
|
||||||
return self.jsonx
|
return self.jsonx
|
||||||
if scale1 !=0:
|
if scale1 !=0:
|
||||||
@ -184,6 +187,28 @@ class FaceClass(object):
|
|||||||
return jsonstr
|
return jsonstr
|
||||||
|
|
||||||
|
|
||||||
|
# @doc This does deomgraphic examination on a pic.
|
||||||
|
# If you are smartserv, "crowd" means cam and "govid" means regula pic
|
||||||
|
def traffic(self, name, file, scale=0):
|
||||||
|
print("##1##")
|
||||||
|
jsons = []
|
||||||
|
if self.json2obj(self.load1(name, file))["status"] != 0:
|
||||||
|
return self.jsonx
|
||||||
|
if scale !=0:
|
||||||
|
self.shrink(name,scale)
|
||||||
|
if self.json2obj(self.detect(name))["status"] != 0:
|
||||||
|
return self.jsonx
|
||||||
|
for i in range(len(self.boxes)):
|
||||||
|
b = self.boxes[i]
|
||||||
|
print(">>>>" , b)
|
||||||
|
analysis = self.metadata(self.imgs[name][b[0]:b[0]+b[1],b[2]:b[2]+b[3]])
|
||||||
|
jsons.append(analysis)
|
||||||
|
print(json.dumps(jsons))
|
||||||
|
return jsonx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###### ##### # #####
|
###### ##### # #####
|
||||||
# # # # #
|
# # # # #
|
||||||
##### # # # #
|
##### # # # #
|
||||||
@ -232,6 +257,7 @@ class FaceClass(object):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### ###### ##### # # ##### # #
|
##### ###### ##### # # ##### # #
|
||||||
# # # # # # # # ## #
|
# # # # # # # # ## #
|
||||||
# # ##### # # # # # # # #
|
# # ##### # # # # # # # #
|
||||||
|
|||||||
@ -26,7 +26,6 @@ class Paravision(FaceClass):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# @doc find all the faces in the named image
|
# @doc find all the faces in the named image
|
||||||
def detect(self, name):
|
def detect(self, name):
|
||||||
boxes = []
|
boxes = []
|
||||||
@ -34,16 +33,12 @@ class Paravision(FaceClass):
|
|||||||
print("** face_recognition::detect ... %s" % name)
|
print("** face_recognition::detect ... %s" % name)
|
||||||
try:
|
try:
|
||||||
# Get all faces from images with qualities, landmarks, and embeddings
|
# Get all faces from images with qualities, landmarks, and embeddings
|
||||||
res = self.sdk.get_faces([self.imgs[name]], qualities=True, landmarks=True, embeddings=True)
|
res = self.sdk.get_faces([self.imgs[name]])
|
||||||
boxes = res.faces
|
boxes = res.faces
|
||||||
print(boxes)
|
|
||||||
for a in boxes:
|
|
||||||
print(box)
|
|
||||||
# box is somehow = y1 / x2 / y2 / x1 for face_recognition .
|
|
||||||
# =
|
|
||||||
# Thats crazy lol. We need to fix that to x1, y1, x2, y2 with origin at top left
|
|
||||||
for b in boxes:
|
for b in boxes:
|
||||||
self.boxes.append((b[3],b[0],b[1],b[2]))
|
b = b.bounding_box
|
||||||
|
#print(b)
|
||||||
|
self.boxes.append((int(b.origin.x),int(b.origin.y),int(b.origin.x+b.width),int(b.origin.y+b.height)))
|
||||||
print("found %d boxes for %s" % (len(self.boxes), name) )
|
print("found %d boxes for %s" % (len(self.boxes), name) )
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.errstr = "image processing exception at get_faces: "+str(ex)
|
self.errstr = "image processing exception at get_faces: "+str(ex)
|
||||||
@ -52,123 +47,33 @@ class Paravision(FaceClass):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Assess the face that was read in
|
# @doc compare two named images, previously loaded
|
||||||
def processiii(self):
|
def compare(self, name1, name2):
|
||||||
# Get all faces metadata
|
print("** face_recognition::compare ... %s vs %s" % (name1,name2))
|
||||||
print("Finding faces in %s" %(self.imgpath))
|
self.encs[name1] = self.sdk.get_embedding_from_prepared_image(self.imgs[name1])
|
||||||
faces = self.sdk.get_faces([self.image], qualities=True, landmarks=True, embeddings=True)
|
self.encs[name2] = self.sdk.get_embedding_from_prepared_image(self.imgs[name2])
|
||||||
print("Getting metadata")
|
#if self.encs[name1]==[]:
|
||||||
inferences = faces.image_inferences
|
# return '{ "status":14330, "remark":"could not encode image", "guilty_param":"img1", "guilty_value":"%s" }' % name1
|
||||||
print("Getting best face")
|
#if self.encs[name2]==[]:
|
||||||
ix = inferences[0].most_prominent_face_index()
|
# return '{ "status":14331, "remark":"could not encode image", "guilty_param":"img2", "guilty_value":"%s" }' % name2
|
||||||
print("Getting a mathematical mode of that best face")
|
print(self.encs.keys())
|
||||||
self.model = inferences[0].faces[ix].embedding
|
print(self.encs[name1])
|
||||||
print("Getting image quality scores..")
|
print(self.encs[name2])
|
||||||
self.score = round(1000*inferences[0].faces[ix].quality)
|
|
||||||
print("Score was %d" %(self.score))
|
|
||||||
return self.score
|
|
||||||
|
|
||||||
# Compare to a face in another Facematch instance
|
|
||||||
def compare(self,other):
|
|
||||||
# Get face match score
|
|
||||||
return self.sdk.get_match_score(self.model, other.model)
|
|
||||||
|
|
||||||
|
|
||||||
#mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
def load(self, dev1, dev2, id_image_filepath, photo_image_filepath):
|
|
||||||
print("## loading images", dev1, dev2)
|
|
||||||
self.dev1 = dev1
|
|
||||||
self.dev2 = dev2
|
|
||||||
try:
|
try:
|
||||||
self.id_image = pru.load_image(id_image_filepath)
|
res = self.sdk.get_match_score(self.model, other.model)
|
||||||
except Exception as e:
|
print("Match is ",res)
|
||||||
return "id image loading failed ", e
|
#self.match_score = 1000 * (1 - face_recognition.face_distance([self.encs[name1][0]], self.encs[name2][0]))
|
||||||
try:
|
#print("Score is ",self.match_score)
|
||||||
self.photo_image = pru.load_image(photo_image_filepath)
|
|
||||||
except Exception as e:
|
|
||||||
return "client image loading failed ", e
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_faces(self):
|
|
||||||
try:
|
|
||||||
# Get all faces from images with qualities, landmarks, and embeddings
|
|
||||||
print("Finding faces...")
|
|
||||||
self.inference_result = self.sdk.get_faces([self.id_image, self.photo_image], qualities=True, landmarks=True, embeddings=True)
|
|
||||||
print("Inferences...")
|
|
||||||
self.image_inference_result = self.inference_result.image_inferences
|
|
||||||
if len(self.image_inference_result)==0:
|
|
||||||
return "no inferences found"
|
|
||||||
|
|
||||||
# Get most prominent face
|
|
||||||
print("Most prominent...")
|
|
||||||
self.id_face = self.image_inference_result[0].most_prominent_face_index()
|
|
||||||
self.photo_face = self.image_inference_result[1].most_prominent_face_index()
|
|
||||||
if self.id_face<0:
|
|
||||||
return "no id face found"
|
|
||||||
if self.photo_face<0:
|
|
||||||
return "no live face found"
|
|
||||||
|
|
||||||
# Get numerical representation of faces (required for face match)
|
|
||||||
print("stats...")
|
|
||||||
if (len(self.image_inference_result)<2):
|
|
||||||
return "ID or human face could not be recognised"
|
|
||||||
self.id_emb = self.image_inference_result[0].faces[self.id_face].embedding
|
|
||||||
self.photo_emb = self.image_inference_result[1].faces[self.photo_face].embedding
|
|
||||||
|
|
||||||
except Exception as ex:
|
|
||||||
return "image processing exception "+str(ex)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def compute_scores(self):
|
|
||||||
try:
|
|
||||||
# Get image quality scores (how 'good' a face is)
|
|
||||||
self.id_qual = self.image_inference_result[0].faces[self.id_face].quality
|
|
||||||
self.photo_qual = self.image_inference_result[1].faces[self.photo_face].quality
|
|
||||||
|
|
||||||
self.id_qual = round(self.id_qual, 3)
|
|
||||||
self.photo_qual = round(self.photo_qual, 3)
|
|
||||||
|
|
||||||
# Get face match score
|
|
||||||
self.match_score = self.sdk.get_match_score(self.id_emb, self.photo_emb)
|
|
||||||
|
|
||||||
# Create .json
|
# Create .json
|
||||||
self.face_match_json = {"device1":self.dev1,
|
self.tree["score"] = self.match_score[0]
|
||||||
"device2":self.dev2,
|
|
||||||
"passmark":500,
|
|
||||||
"device1_qual":self.id_qual,
|
|
||||||
"device2_qual":self.photo_qual,
|
|
||||||
"match_score":self.match_score}
|
|
||||||
|
|
||||||
#return json.dumps(self.face_match_json)
|
|
||||||
|
|
||||||
#print(self.face_match_json)
|
|
||||||
|
|
||||||
# Send to core
|
|
||||||
#url = "%s/notify/%s/%s" % (self.conf["core"], self.conf["identity"], face_match_json)
|
|
||||||
#url = url.replace(" ", "%20") # Remove spaces
|
|
||||||
#buf = []
|
|
||||||
#req = urllib.request.Request( url )
|
|
||||||
#with urllib.request.urlopen(req) as response:
|
|
||||||
#print(response.read())
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
return str(ex)
|
print("** paravision::compare exception ... " + str(ex) )
|
||||||
|
self.errstr = "image comparison exception at compute_scores: "+str(ex)
|
||||||
|
return '{ "status":332410, "remark":"%s" }' % self.errstr
|
||||||
def get_scores(self):
|
return '{ "status":0, "remark":"OK", "score":%d }' % self.match_score[0]
|
||||||
return json.dumps(self.face_match_json)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@ -32,6 +32,7 @@ class yoloserv(object):
|
|||||||
palmdetector = None
|
palmdetector = None
|
||||||
facematcher = None
|
facematcher = None
|
||||||
palmmatcher = None
|
palmmatcher = None
|
||||||
|
traffic = None
|
||||||
ir_camera = None
|
ir_camera = None
|
||||||
devices = []
|
devices = []
|
||||||
points = []
|
points = []
|
||||||
@ -215,6 +216,13 @@ class yoloserv(object):
|
|||||||
return self.facematcher.compare(name1,name2)
|
return self.facematcher.compare(name1,name2)
|
||||||
|
|
||||||
|
|
||||||
|
# Traffic analysis
|
||||||
|
@cherrypy.expose
|
||||||
|
def svc_traffic(self,infile=None):
|
||||||
|
return self.facematcher.traffic(infile)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
|
|||||||
BIN
testimg/crowd.jpg
Normal file
BIN
testimg/crowd.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
Loading…
Reference in New Issue
Block a user