diff --git a/doc/yoloserv.odt b/doc/yoloserv.odt
index b6b53fdd5..22222fa14 100644
Binary files a/doc/yoloserv.odt and b/doc/yoloserv.odt differ
diff --git a/sbin/ctl.sh b/sbin/ctl.sh
index 57b26a46b..bbf6cde2d 100755
--- a/sbin/ctl.sh
+++ b/sbin/ctl.sh
@@ -97,7 +97,13 @@ do
"seek") LLP="$LLP:$LIB_SEEK"
PYP="$PYP:src"
;;
- esac
+ "face_recognition") LLP="$LLP:$LIB_DEEPFACE"
+ PYP="$PYP:$PYP_DEEPFACE"
+ ;;
+ "camera") ;;
+ *) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
+ exit 1
+ esac
done
echo "PYTHONPATH = $PYP"
export LD_LIBRARY_PATH="$LLP"
@@ -124,37 +130,9 @@ function f_apt(){
pip3 install yolov8
pip3 install deepface
- case $1 in
- "updates") apt update
- apt upgrade
apt install libopencv-dev python3-opencv
- python3 -m pip install --upgrade pip
- ;;
- "regula") f_apt_regula
- ;;
- "yolov5") f_apt_yolov5
- ;;
- "yolov8") f_apt_yolov8
- ;;
- "para_facematch") f_apt_openvino
- f_apt_paravision
- ;;
- "openvino") f_apt_openvino
- ;;
- "deepface") pip3 install deepface
- ;;
- "facerec") f_apt_facerec
- ;;
- "intox") f_apt_yolov5
- f_apt_intox
- ;;
- *) echo "Error. Second parameter must be updates|regula|yolov5|paravision|freeface|paraface|palm|intox"
- ;;
- esac
-}
-
-
-function f_apt_regula(){
+ pip3 install deepface
+ #REGULA..
apt install pcscd libccid
wget -O downloads/regula.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-reader/regula-reader_6.7.198393.16246_amd64.deb
wget -O downloads/drivers.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-drivers/regula-drivers_2.1.1.12_amd64.deb
@@ -162,17 +140,16 @@ function f_apt_regula(){
sudo pip3 install openvino
- . /usr/local/lib/python3.10/dist-packages/
- ROOTURL="http://824f668f-5c6e-4ffe-8b5b-edb4d0301982:985c5412-5e0d-4d66-8d28-ed32dbb900a3@paravision.mycloudrepo.io"
- pip3 install cmake --upgrade
- pip3 install --no-cache-dir\
- --extra-index-url $ROOTURL/repositories/python-sdk\
- --extra-index-url $ROOTURL/repositories/python-recognition\
- "paravision-recognition" "paravision-models-gen5-balanced-openvino-2022-3" "openvino==2022.3"\
- --trusted-host paravision.mycloudrepo.io
+ # bypass paravision
+ # . /usr/local/lib/python3.10/dist-packages/
+ # ROOTURL="http://824f668f-5c6e-4ffe-8b5b-edb4d0301982:985c5412-5e0d-4d66-8d28-ed32dbb900a3@paravision.mycloudrepo.io"
+ # pip3 install cmake --upgrade
+ # pip3 install --no-cache-dir\
+ # --extra-index-url $ROOTURL/repositories/python-sdk\
+ # --extra-index-url $ROOTURL/repositories/python-recognition\
+ # "paravision-recognition" "paravision-models-gen5-balanced-openvino-2022-3" "openvino==2022.3"\
+ # --trusted-host paravision.mycloudrepo.io
- # Open source Face_Recognition
- pip3 install deepface
wget -O downloads/facerec.zip https://github.com/ageitgey/face_recognition/archive/refs/heads/master.zip
cd downloads
@@ -195,7 +172,10 @@ function f_apt_intox(){
#### #### # # # # # #### ######
function f_test(){
- wget http://localhost:$YOLOPORT/process/
+ mkdir -p $UKDI_yolo_indir
+ cp testimg/* $UKDI_yolo_indir
+ # echo "loading messi 1"
+ wget "http://localhost:$YOLOPORT/svc_load_face/messi1/messi1.jpg"
}
function f_start(){
@@ -215,11 +195,10 @@ function f_stop(){
}
-
echo "Running $0 with option $1 at $DIR"
case $1 in
- "apt") f_apt $2
+ "apt") f_apt
;;
"start") f_start
;;
diff --git a/src/camera.py b/src/camera.py
index 9c843ec91..eb7470480 100644
--- a/src/camera.py
+++ b/src/camera.py
@@ -52,12 +52,15 @@ class Camera(object):
if ret!=True:
return '{ "status":4773, "remark":"Could not grab frame " }'
pixel_avg = self.frame.mean()
- if pixel_avg > 100:
- break
+ if pixel_avg > 150:
+ return '{ "status":4774, "remark":"Frame is too bright " }'
+ if pixel_avg < 50:
+ return '{ "status":4775, "remark":"Frame is too dark " }'
cap.release()
+ return '{ "status":0, "remark":"OK", "pixel_avg":%d }' % pixel_avg
- # show the raw acquired image for a test
+ # show the raw acquired image for a test - only works if opencv was compiled a certain way
def show(self):
cv2.imshow("test_window",self.frame)
cv2.waitKey(0)
@@ -66,19 +69,12 @@ class Camera(object):
# save the image to a named file
def save(self, filename, format="png"):
- ret = 0
- buffer = None
- if format=="png":
- ret, buffer = cv2.imencode('.png', self.frame)
- else:
- ret, buffer = cv2.imencode('.jpg', self.frame)
- if not ret:
- print("Bad ret code ", ret)
- cv2.imwrite(filename + "." + format, buffer)
+ cv2.imwrite(filename + "." + format, self.frame)
+ return 0
def dump(self):
- print (self.frame)
+ #print (self.frame)
return self.frame
@@ -98,5 +94,6 @@ class Camera(object):
if __name__ == '__main__':
m = Camera()
- m.acquire(0,"/tmp/newpic.jpg",True)
+ print(m.acquire("0"))
+ m.save("/tmp/test","png")
diff --git a/src/face_recognitionx.py b/src/face_recognitionx.py
index 48f42c927..0cfc567e0 100644
--- a/src/face_recognitionx.py
+++ b/src/face_recognitionx.py
@@ -42,7 +42,9 @@ import urllib.request
from faceclass import FaceClass
import time
-
+#
+# This, together with its superclass, is the reference implementation - CTO
+#
class FaceRecognition(FaceClass):
@@ -53,32 +55,30 @@ class FaceRecognition(FaceClass):
face_match_json = None
conf = []
-
+ # @doc find all the faces in the named image
def get_faces(self, name):
print("** get_faces ... %s" % name)
try:
# Get all faces from images with qualities, landmarks, and embeddings
- boxes = face_recognition.face_locations(self.imgs[name], number_of_times_to_upsample=2, model='hog')
- print("found %d boxes for %s" % (len(boxes), name) )
+ self.boxes = face_recognition.face_locations(self.imgs[name], number_of_times_to_upsample=2, model='hog')
+ print("found %d boxes for %s" % (len(self.boxes), name) )
# Get numerical representation of faces (required for face match)
self.encs[name] = face_recognition.face_encodings(self.imgs[name])[0]
#print("encoding for %s : " % name, self.encs[name])
except Exception as ex:
self.errstr = "image processing exception at get_faces: "+str(ex)
- return -1
- return len(boxes)
-
-
-# return " id=%d photo=%d result=%d " % (self.id_face, self.photo_face, len(self.image_inference_result))
+ return '{ "status":222310, "remark":"image processing exception", "guilty_param":"error", "guilty_value":"%s" }' % str(ex)
+ return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s }' % (len(self.boxes), json.dumps(self.boxes))
+ # @doc find the landmarks of the given face
def get_landmarks(self, name):
- self.landmarks = face_recognition.face_landmarks(self.imgs[name])
- print(self.landmarks)
- return self.landmarks
+ landmarks = face_recognition.face_landmarks(self.imgs[name])
+ return '{ "status":0, "remark":"OK", "landmarks":%s }' % json.dumps(landmarks)
- def compute_scores(self, name1, name2):
+ # @doc compare two named images, previously loaded
+ def compare(self, name1, name2):
print("** computing ... %s vs %s" % (name1,name2))
try:
res = face_recognition.compare_faces([self.encs[name1]], self.encs[name2])
@@ -90,8 +90,9 @@ class FaceRecognition(FaceClass):
self.tree["score"] = self.match_score[0]
except Exception as ex:
self.errstr = "image comparison exception at compute_scores: "+str(ex)
- return -1
- return self.match_score
+ return '{ "status":332410, "remark":"%s" }' % self.errstr
+ return '{ "status":0, "remark":"OK", "score":%d }' % self.match_score[0]
+
def get_scores(self):
return json.dumps(self.tree)
@@ -99,7 +100,7 @@ class FaceRecognition(FaceClass):
def detect_all(self):
n=0
- self.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
+ self.load("localcam", "/tmp/localcam.png", "regula", "/tmp/regula/Portrait_0.jpg")
self.get_faces("localcam")
self.get_faces("regula")
print("computed ... ", d.compute_scores("regula","localcam"))
@@ -115,7 +116,7 @@ if __name__ == '__main__':
if sys.argv[1]=="kiosk":
# lfw
n=0
- d.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
+ d.load("localcam", "/tmp/localcam.png", "regula", "/tmp/regula/Portrait_0.jpg")
d.get_faces("localcam")
d.get_faces("regula")
print("computed ... ", d.compute_scores("regula","localcam"))
diff --git a/src/faceclass.py b/src/faceclass.py
index ef2dd1c9a..23f01fc42 100644
--- a/src/faceclass.py
+++ b/src/faceclass.py
@@ -2,6 +2,8 @@ import sys
import os
import cv2
import json
+import base64
+
from matplotlib import pyplot as plt
class FaceClass(object):
@@ -24,20 +26,20 @@ class FaceClass(object):
# Load a pics using the device label
def load1(self, name,fname):
if not os.path.isfile(fname):
- return False
+ return '{ "status":442565, "remark":"file name not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname)
self.imgs[name] = cv2.imread(fname)
- #print(" Loaded %s from file %s" % (name, fname))
- return True
+ print(" Loaded %s from file %s" % (name, fname))
+ return '{ "status":0, "remark":"OK", "name":"%s", "fname":"%s" }' % (name,fname)
- def load(self, name1,name2,fname1,fname2):
+ def load2(self, name1,fname1,name2,fname2):
print("FaceClass loading files ....................... ")
if not os.path.isfile(fname1):
print("Cant access file ",fname1)
- return -1
+ return '{ "status":442566, "remark":"file not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname1)
if not os.path.isfile(fname2):
print("Cant access file ",fname2)
- return -1
+ return '{ "status":442567, "remark":"file not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname2)
self.imfiles.append(fname1)
self.imfiles.append(fname2)
self.imnames[name1] = fname1
@@ -49,38 +51,79 @@ class FaceClass(object):
p2.imshow(name2, self.imgs[name2])
p1.show()
print("FaceClass: Loaded %s from file %s" % (name1, fname1))
- return 1
+ print("FaceClass: Loaded %s from file %s" % (name2, fname2))
+ return '{ "status":0, "remark":"OK", "name1":"%s", "fname1":"%s", "name2":"%s", "fname2":"%s" }' % (name1,fname1,name2,fname2)
- def box(self, name, x, y, w, h):
- cv2.rectangle(self.imgs[name],(x,y),(x+w,y+h), (0,255,0), 4)
+ # @doc draw a box and save with a new name
+ def rect(self, name, x1, y1, x2, y2, newname):
+ self.imgs[newname] = cv2.rectangle(self.imgs[name],(x1,y1),(x2,y2), (0,255,0), 4)
+ # @doc crop an image and save with a new name
+ def crop(self, name, x1, y1, x2, y2, newname):
+ print(x1,y1,x2,y2)
+ self.imgs[newname] = self.imgs[name][x1:x2, y1:y2]
# Load the config
def init(self):
with open("/etc/ukdi.json","r") as f:
self.conf = json.loads(f.read())
-
# Find all faces
def get_faces(self,name):
- return -1
+ return '{ "status":88241, "remark":"override this!" }'
# Find best face
def get_best_face(self):
- return None
+ return '{ "status":88242, "remark":"override this!" }'
+
+ # @doc find the biggest face in the named image
+ def get_ideal(self, name):
+ self.boxes = []
+ self.get_faces(name)
+ found = -1
+ ix = 0
+ biggest = -1
+ for b in self.boxes:
+ print(json.dumps(b))
+ area = b[3] * b[2]
+ print(b,area)
+ if area > biggest:
+ found = ix
+ biggest = area
+ # box returns (left,top,right,bottom) - ffs
+ #self.crop(name,b[1],b[0],b[3],b[2],"_crop")
+ #self.rect(name,b[0],b[1],b[2],b[3],"_rect")
+ # rect expects x1,y1,x2,y2
+ self.rect(name,b[0],b[1],b[1]+b[3],b[0]+b[2],"_rect")
+ ix+=1
+ if found < 0:
+ return '{ "status":8572421, "remark":"no ideal face", "guilty_param":"name", "guilty_value":"%s" }' % name
+ return '{ "status":0, "remark":"OK", "faces":%d, "ideal_ix":%s, "ideal_area":%d, "boxes":%s }' % (len(self.boxes), found, biggest, json.dumps(self.boxes))
+
+
+ # return a base64 version of the pic in memory
+ def dump64(self,which,format="png"):
+ if format=="jpg":
+ _ , im_arr = cv2.imencode('.jpg', self.imgs[which])
+ img_as_txt = base64.b64encode(im_arr)
+ return b'data:image/jpeg;base64,'+img_as_txt
+ _ , im_arr = cv2.imencode('.png', self.imgs[which])
+ img_as_txt = base64.b64encode(im_arr)
+ return b'data:image/png;base64, '+img_as_txt
+
# Find landmarks
def get_landmarks(self):
- return '{ "status": 9999 }'
+ return '{ "status":88243, "remark":"override this!" }'
# Find metadata (age etc)
def get_metadata(self,name):
- return None
+ return '{ "status":88244, "remark":"override this!" }'
# Match two faces
def process(self,name1,name2):
- return None
+ return '{ "status":88245, "remark":"override this!" }'
diff --git a/src/facematch_open.py b/src/facematch_open.py
index 4a6ec48d1..b80fb8b7b 100644
--- a/src/facematch_open.py
+++ b/src/facematch_open.py
@@ -40,9 +40,8 @@ import urllib.request
import time
-from ukdi import UKDI
-class Facematch(UKDI):
+class Facematch(object):
name = "face_recognition_face_match"
IN_ENGLISH = "A class for checking the similarity between two faces."
diff --git a/src/yoloserv.py b/src/yoloserv.py
index 6ec082623..35064154e 100644
--- a/src/yoloserv.py
+++ b/src/yoloserv.py
@@ -52,6 +52,8 @@ class yoloserv(object):
print("Init yoloserv: %s @ %s %s " % (self.conf["yolo_devices"], self.indir, self.outdir) )
self.devices = self.conf["yolo_devices"].split(",")
+ self.indir = self.conf["yolo_indir"]
+ self.outdir = self.conf["yolo_outdir"]
# Object (face) matching
if "paravision" in self.devices:
@@ -65,7 +67,7 @@ class yoloserv(object):
self.facematcher = Deepfacex()
self.facematcher.init("dlib","SFace")
if "face_recognition" in self.devices:
- print("Loading deepface facematch...")
+ print("Loading face_recognition facematch...")
from face_recognitionx import FaceRecognition
self.facematcher = FaceRecognition()
self.facematcher.init()
@@ -122,55 +124,80 @@ class yoloserv(object):
self.intox_detector.init()
+
# Acquire image - the device used depends on the device list and what actual file was loaded as self.camera
+ # @doc acquires an image from the camera (test OK CG 2024-0724)
@cherrypy.expose
def svc_acquire(self,camidx=0):
self.camera.acquire(camidx)
- return "0 OK"
+ return '{ "status":0, "remark":"OK" }'
- # Test
+ # Test- (opencv)
@cherrypy.expose
def svc_show(self):
self.camera.show()
- return "0 OK"
+ return '{ "status":0, "remark":"OK" }'
+ # @doc saves the camera image to a file (test OK CG 2024-0724)
@cherrypy.expose
def svc_save(self,filename,extn="png"):
- self.camera.save(filename,extn)
- return "0 OK"
+ self.camera.save(self.outdir + filename,extn)
+ return '{ "status":0, "remark":"OK", "outfile": "%s/%s.%s" }' % (self.outdir,filename,extn)
+ # @doc dumps the camera image as an array (test OK CG 2024-0724)
@cherrypy.expose
def svc_dump(self):
buf = self.camera.dump()
return buf
+ # @doc dumps the camera image as a base64 encoded array (test OK CG 2024-0724)
@cherrypy.expose
def svc_dump64(self):
- buf = self.camera.dump64()
+ buf = self.camera.dump64().decode()
return buf
+ # @doc dumps the camera image as an
tag for straight to HTML output (test OK CG 2024-0724)
+ @cherrypy.expose
+ def svc_dumphtml(self):
+ buf = self.camera.dump64().decode()
+ return "
" % buf
+
+
# Find faces - the algorithm used depends on the device list and what actual file was loaded as self.camera
- # Simply load an image
+ # @doc load an image from a file using the specified yoloserv plugin (test OK CG 2024-0724)
@cherrypy.expose
- def svc_load_face(self,infile):
- self.facematcher.load(infile)
+ def svc_load_face(self,name,infile):
+ return self.facematcher.load1(name, self.indir + infile)
+ # @doc load images from two files using the specified yoloserv plugin (test OK CG 2024-0724)
@cherrypy.expose
def svc_detect_faces(self,infile):
nfaces = self.facematcher.detect_all(self.indir + infile)
return '{ "status":0, "remark":"found faces", "count":%d }' % (nfaces)
- # Find the most prominent object
+ # @doc find all the faces in the named image that was loaded using the above calls (test OK CG 2024-0724)
@cherrypy.expose
- def svc_ideal_face(self,infile):
- self.facematcher.detect_one(infile)
+ def svc_get_faces(self,which):
+ return self.facematcher.get_faces(which)
+
+ # @doc find the most prominent face in the named image that was loaded using the above calls (test OK CG 2024-0724)
+ # you can access the new ideal face (if present) with the image name "_ideal"
+ @cherrypy.expose
+ def svc_get_ideal(self,which):
+ return self.facematcher.get_ideal(which)
+
+ # @doc dumps the named image as an
tag for straight to HTML output (test OK CG 2024-0724)
+ @cherrypy.expose
+ def svc_imgtag(self,which):
+ buf = self.facematcher.dump64(which).decode()
+ return "
" % buf
# Match faces together
@cherrypy.expose
- def svc_match_faces(self,infile1,infile2):
- return self.facematcher.detect(infile1,infile2)
+ def svc_compare(self,name1,name2):
+ return self.facematcher.compare(name1,name2)
@cherrypy.expose
@@ -179,8 +206,24 @@ class yoloserv(object):
os._exit(0)
+
+
+
+ # @doc find the landmarks in the named image (test OK CG 2024-0724)
@cherrypy.expose
- def facematch(self,dev1,dev2):
+ def svc_get_landmarks(self,which):
+ return self.facematcher.get_landmarks(which)
+
+ # Match faces together
+ @cherrypy.expose
+ def svc_match_faces(self,infile1,infile2):
+ return self.facematcher.detect(infile1,infile2)
+ def json2obj(self,jsonx):
+ return json.laods(jsonx)
+
+ # @doc put all the steps for a retail facematch into one convenient functions
+ @cherrypy.expose
+ def retail_facematch(self,dev1,dev2):
if self.facematcher is None:
return '{ "status":777244, "remark":"suitable yolo_device" }'
@@ -198,15 +241,18 @@ class yoloserv(object):
if self.conf["emulate_facematch"]:
return '{ "status":0, "remark":"OK", "data":{} }'
- status = self.facematcher.load(dev1, dev2, img1, img2)
- if not status:
- return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
- if self.facematcher.get_faces(dev1) < 1:
- return '{ "status":777243, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev1)
- if self.facematcher.get_faces(dev2) < 1:
- return '{ "status":777244, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev2)
- if self.facematcher.compute_scores(dev1,dev2) < 1:
- return '{ "status":777245, "remark":"face matching failed", "guilty_param":"image", "guilty_value":"%s" }' % (self.facematcher.errstr)
+ obj = self.json2obj(self.facematcher.load_faces(dev1, img1, dev2, img2))
+ if obj["status"] > 0:
+ return jsonx
+ obj = self.json2obj(self.facematcher.get_faces(dev1))
+ if obj["status"] > 0:
+ return jsonx
+ obj = self.json2obj(self.facematcher.get_faces(dev2))
+ if obj["status"] > 0:
+ return jsonx
+ obj = self.json2obj(self.facematcher.compute_scores(dev1,dev2))
+ if obj["status"] > 0:
+ return jsonx
jsonstr = self.facematcher.get_scores()
return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr)
@@ -366,7 +412,6 @@ if __name__ == '__main__':
s.indir = sys.argv[2]
s.outdir = sys.argv[3]
- #s.initialise(indir,outdir,weightsfile)
cherrypy.config.update({'server.socket_host': '0.0.0.0',
'server.socket_port': servport})
cherrypy.quickstart(s, '/')