Merge branch 'master' of ssh://atlantean.ydns.eu:23/home/disp/git/repos/yoloserv
This commit is contained in:
commit
c4d2624af5
BIN
doc/yoloserv.odt
BIN
doc/yoloserv.odt
Binary file not shown.
67
sbin/ctl.sh
67
sbin/ctl.sh
@ -89,7 +89,13 @@ do
|
|||||||
"deepface") LLP="$LLP:$LIB_DEEPFACE"
|
"deepface") LLP="$LLP:$LIB_DEEPFACE"
|
||||||
PYP="$PYP:$PYP_DEEPFACE"
|
PYP="$PYP:$PYP_DEEPFACE"
|
||||||
;;
|
;;
|
||||||
esac
|
"face_recognition") LLP="$LLP:$LIB_DEEPFACE"
|
||||||
|
PYP="$PYP:$PYP_DEEPFACE"
|
||||||
|
;;
|
||||||
|
"camera") ;;
|
||||||
|
*) echo "yoloserv does not implement backend $i. Edit /etc/ukdi.json::yolo_devices and try again."
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
done
|
done
|
||||||
echo "PYTHONPATH = $PYP"
|
echo "PYTHONPATH = $PYP"
|
||||||
export LD_LIBRARY_PATH="$LLP"
|
export LD_LIBRARY_PATH="$LLP"
|
||||||
@ -116,37 +122,9 @@ function f_apt(){
|
|||||||
pip3 install yolov8
|
pip3 install yolov8
|
||||||
pip3 install deepface
|
pip3 install deepface
|
||||||
|
|
||||||
case $1 in
|
|
||||||
"updates") apt update
|
|
||||||
apt upgrade
|
|
||||||
apt install libopencv-dev python3-opencv
|
apt install libopencv-dev python3-opencv
|
||||||
python3 -m pip install --upgrade pip
|
pip3 install deepface
|
||||||
;;
|
#REGULA..
|
||||||
"regula") f_apt_regula
|
|
||||||
;;
|
|
||||||
"yolov5") f_apt_yolov5
|
|
||||||
;;
|
|
||||||
"yolov8") f_apt_yolov8
|
|
||||||
;;
|
|
||||||
"para_facematch") f_apt_openvino
|
|
||||||
f_apt_paravision
|
|
||||||
;;
|
|
||||||
"openvino") f_apt_openvino
|
|
||||||
;;
|
|
||||||
"deepface") pip3 install deepface
|
|
||||||
;;
|
|
||||||
"facerec") f_apt_facerec
|
|
||||||
;;
|
|
||||||
"intox") f_apt_yolov5
|
|
||||||
f_apt_intox
|
|
||||||
;;
|
|
||||||
*) echo "Error. Second parameter must be updates|regula|yolov5|paravision|freeface|paraface|palm|intox"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function f_apt_regula(){
|
|
||||||
apt install pcscd libccid
|
apt install pcscd libccid
|
||||||
wget -O downloads/regula.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-reader/regula-reader_6.7.198393.16246_amd64.deb
|
wget -O downloads/regula.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-reader/regula-reader_6.7.198393.16246_amd64.deb
|
||||||
wget -O downloads/drivers.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-drivers/regula-drivers_2.1.1.12_amd64.deb
|
wget -O downloads/drivers.deb https://downloads.regulaforensics.com/repo/ubuntu/pool/stable/r/regula-drivers/regula-drivers_2.1.1.12_amd64.deb
|
||||||
@ -154,17 +132,16 @@ function f_apt_regula(){
|
|||||||
|
|
||||||
sudo pip3 install openvino
|
sudo pip3 install openvino
|
||||||
|
|
||||||
. /usr/local/lib/python3.10/dist-packages/
|
# bypass paravision
|
||||||
ROOTURL="http://824f668f-5c6e-4ffe-8b5b-edb4d0301982:985c5412-5e0d-4d66-8d28-ed32dbb900a3@paravision.mycloudrepo.io"
|
# . /usr/local/lib/python3.10/dist-packages/
|
||||||
pip3 install cmake --upgrade
|
# ROOTURL="http://824f668f-5c6e-4ffe-8b5b-edb4d0301982:985c5412-5e0d-4d66-8d28-ed32dbb900a3@paravision.mycloudrepo.io"
|
||||||
pip3 install --no-cache-dir\
|
# pip3 install cmake --upgrade
|
||||||
--extra-index-url $ROOTURL/repositories/python-sdk\
|
# pip3 install --no-cache-dir\
|
||||||
--extra-index-url $ROOTURL/repositories/python-recognition\
|
# --extra-index-url $ROOTURL/repositories/python-sdk\
|
||||||
"paravision-recognition" "paravision-models-gen5-balanced-openvino-2022-3" "openvino==2022.3"\
|
# --extra-index-url $ROOTURL/repositories/python-recognition\
|
||||||
--trusted-host paravision.mycloudrepo.io
|
# "paravision-recognition" "paravision-models-gen5-balanced-openvino-2022-3" "openvino==2022.3"\
|
||||||
|
# --trusted-host paravision.mycloudrepo.io
|
||||||
|
|
||||||
# Open source Face_Recognition
|
|
||||||
pip3 install deepface
|
|
||||||
|
|
||||||
wget -O downloads/facerec.zip https://github.com/ageitgey/face_recognition/archive/refs/heads/master.zip
|
wget -O downloads/facerec.zip https://github.com/ageitgey/face_recognition/archive/refs/heads/master.zip
|
||||||
cd downloads
|
cd downloads
|
||||||
@ -187,7 +164,10 @@ function f_apt_intox(){
|
|||||||
#### #### # # # # # #### ######
|
#### #### # # # # # #### ######
|
||||||
|
|
||||||
function f_test(){
|
function f_test(){
|
||||||
wget http://localhost:$YOLOPORT/process/
|
mkdir -p $UKDI_yolo_indir
|
||||||
|
cp testimg/* $UKDI_yolo_indir
|
||||||
|
# echo "loading messi 1"
|
||||||
|
wget "http://localhost:$YOLOPORT/svc_load_face/messi1/messi1.jpg"
|
||||||
}
|
}
|
||||||
|
|
||||||
function f_start(){
|
function f_start(){
|
||||||
@ -207,11 +187,10 @@ function f_stop(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
echo "Running $0 with option $1 at $DIR"
|
echo "Running $0 with option $1 at $DIR"
|
||||||
case $1 in
|
case $1 in
|
||||||
|
|
||||||
"apt") f_apt $2
|
"apt") f_apt
|
||||||
;;
|
;;
|
||||||
"start") f_start
|
"start") f_start
|
||||||
;;
|
;;
|
||||||
|
|||||||
@ -52,12 +52,15 @@ class Camera(object):
|
|||||||
if ret!=True:
|
if ret!=True:
|
||||||
return '{ "status":4773, "remark":"Could not grab frame " }'
|
return '{ "status":4773, "remark":"Could not grab frame " }'
|
||||||
pixel_avg = self.frame.mean()
|
pixel_avg = self.frame.mean()
|
||||||
if pixel_avg > 100:
|
if pixel_avg > 150:
|
||||||
break
|
return '{ "status":4774, "remark":"Frame is too bright " }'
|
||||||
|
if pixel_avg < 50:
|
||||||
|
return '{ "status":4775, "remark":"Frame is too dark " }'
|
||||||
cap.release()
|
cap.release()
|
||||||
|
return '{ "status":0, "remark":"OK", "pixel_avg":%d }' % pixel_avg
|
||||||
|
|
||||||
|
|
||||||
# show the raw acquired image for a test
|
# show the raw acquired image for a test - only works if opencv was compiled a certain way
|
||||||
def show(self):
|
def show(self):
|
||||||
cv2.imshow("test_window",self.frame)
|
cv2.imshow("test_window",self.frame)
|
||||||
cv2.waitKey(0)
|
cv2.waitKey(0)
|
||||||
@ -66,19 +69,12 @@ class Camera(object):
|
|||||||
|
|
||||||
# save the image to a named file
|
# save the image to a named file
|
||||||
def save(self, filename, format="png"):
|
def save(self, filename, format="png"):
|
||||||
ret = 0
|
cv2.imwrite(filename + "." + format, self.frame)
|
||||||
buffer = None
|
return 0
|
||||||
if format=="png":
|
|
||||||
ret, buffer = cv2.imencode('.png', self.frame)
|
|
||||||
else:
|
|
||||||
ret, buffer = cv2.imencode('.jpg', self.frame)
|
|
||||||
if not ret:
|
|
||||||
print("Bad ret code ", ret)
|
|
||||||
cv2.imwrite(filename + "." + format, buffer)
|
|
||||||
|
|
||||||
|
|
||||||
def dump(self):
|
def dump(self):
|
||||||
print (self.frame)
|
#print (self.frame)
|
||||||
return self.frame
|
return self.frame
|
||||||
|
|
||||||
|
|
||||||
@ -98,5 +94,6 @@ class Camera(object):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
m = Camera()
|
m = Camera()
|
||||||
m.acquire(0,"/tmp/newpic.jpg",True)
|
print(m.acquire("0"))
|
||||||
|
m.save("/tmp/test","png")
|
||||||
|
|
||||||
|
|||||||
@ -42,7 +42,9 @@ import urllib.request
|
|||||||
from faceclass import FaceClass
|
from faceclass import FaceClass
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
#
|
||||||
|
# This, together with its superclass, is the reference implementation - CTO
|
||||||
|
#
|
||||||
|
|
||||||
class FaceRecognition(FaceClass):
|
class FaceRecognition(FaceClass):
|
||||||
|
|
||||||
@ -53,32 +55,30 @@ class FaceRecognition(FaceClass):
|
|||||||
face_match_json = None
|
face_match_json = None
|
||||||
conf = []
|
conf = []
|
||||||
|
|
||||||
|
# @doc find all the faces in the named image
|
||||||
def get_faces(self, name):
|
def get_faces(self, name):
|
||||||
print("** get_faces ... %s" % name)
|
print("** get_faces ... %s" % name)
|
||||||
try:
|
try:
|
||||||
# Get all faces from images with qualities, landmarks, and embeddings
|
# Get all faces from images with qualities, landmarks, and embeddings
|
||||||
boxes = face_recognition.face_locations(self.imgs[name], number_of_times_to_upsample=2, model='hog')
|
self.boxes = face_recognition.face_locations(self.imgs[name], number_of_times_to_upsample=2, model='hog')
|
||||||
print("found %d boxes for %s" % (len(boxes), name) )
|
print("found %d boxes for %s" % (len(self.boxes), name) )
|
||||||
# Get numerical representation of faces (required for face match)
|
# Get numerical representation of faces (required for face match)
|
||||||
self.encs[name] = face_recognition.face_encodings(self.imgs[name])[0]
|
self.encs[name] = face_recognition.face_encodings(self.imgs[name])[0]
|
||||||
#print("encoding for %s : " % name, self.encs[name])
|
#print("encoding for %s : " % name, self.encs[name])
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.errstr = "image processing exception at get_faces: "+str(ex)
|
self.errstr = "image processing exception at get_faces: "+str(ex)
|
||||||
return -1
|
return '{ "status":222310, "remark":"image processing exception", "guilty_param":"error", "guilty_value":"%s" }' % str(ex)
|
||||||
return len(boxes)
|
return '{ "status":0, "remark":"OK", "faces":%d, "boxes":%s }' % (len(self.boxes), json.dumps(self.boxes))
|
||||||
|
|
||||||
|
|
||||||
# return " id=%d photo=%d result=%d " % (self.id_face, self.photo_face, len(self.image_inference_result))
|
|
||||||
|
|
||||||
|
|
||||||
|
# @doc find the landmarks of the given face
|
||||||
def get_landmarks(self, name):
|
def get_landmarks(self, name):
|
||||||
self.landmarks = face_recognition.face_landmarks(self.imgs[name])
|
landmarks = face_recognition.face_landmarks(self.imgs[name])
|
||||||
print(self.landmarks)
|
return '{ "status":0, "remark":"OK", "landmarks":%s }' % json.dumps(landmarks)
|
||||||
return self.landmarks
|
|
||||||
|
|
||||||
|
|
||||||
def compute_scores(self, name1, name2):
|
# @doc compare two named images, previously loaded
|
||||||
|
def compare(self, name1, name2):
|
||||||
print("** computing ... %s vs %s" % (name1,name2))
|
print("** computing ... %s vs %s" % (name1,name2))
|
||||||
try:
|
try:
|
||||||
res = face_recognition.compare_faces([self.encs[name1]], self.encs[name2])
|
res = face_recognition.compare_faces([self.encs[name1]], self.encs[name2])
|
||||||
@ -90,8 +90,9 @@ class FaceRecognition(FaceClass):
|
|||||||
self.tree["score"] = self.match_score[0]
|
self.tree["score"] = self.match_score[0]
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.errstr = "image comparison exception at compute_scores: "+str(ex)
|
self.errstr = "image comparison exception at compute_scores: "+str(ex)
|
||||||
return -1
|
return '{ "status":332410, "remark":"%s" }' % self.errstr
|
||||||
return self.match_score
|
return '{ "status":0, "remark":"OK", "score":%d }' % self.match_score[0]
|
||||||
|
|
||||||
|
|
||||||
def get_scores(self):
|
def get_scores(self):
|
||||||
return json.dumps(self.tree)
|
return json.dumps(self.tree)
|
||||||
@ -99,7 +100,7 @@ class FaceRecognition(FaceClass):
|
|||||||
|
|
||||||
def detect_all(self):
|
def detect_all(self):
|
||||||
n=0
|
n=0
|
||||||
self.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
|
self.load("localcam", "/tmp/localcam.png", "regula", "/tmp/regula/Portrait_0.jpg")
|
||||||
self.get_faces("localcam")
|
self.get_faces("localcam")
|
||||||
self.get_faces("regula")
|
self.get_faces("regula")
|
||||||
print("computed ... ", d.compute_scores("regula","localcam"))
|
print("computed ... ", d.compute_scores("regula","localcam"))
|
||||||
@ -115,7 +116,7 @@ if __name__ == '__main__':
|
|||||||
if sys.argv[1]=="kiosk":
|
if sys.argv[1]=="kiosk":
|
||||||
# lfw
|
# lfw
|
||||||
n=0
|
n=0
|
||||||
d.load("localcam","regula", "/tmp/localcam.png", "/tmp/regula/Portrait_0.jpg")
|
d.load("localcam", "/tmp/localcam.png", "regula", "/tmp/regula/Portrait_0.jpg")
|
||||||
d.get_faces("localcam")
|
d.get_faces("localcam")
|
||||||
d.get_faces("regula")
|
d.get_faces("regula")
|
||||||
print("computed ... ", d.compute_scores("regula","localcam"))
|
print("computed ... ", d.compute_scores("regula","localcam"))
|
||||||
|
|||||||
@ -2,6 +2,8 @@ import sys
|
|||||||
import os
|
import os
|
||||||
import cv2
|
import cv2
|
||||||
import json
|
import json
|
||||||
|
import base64
|
||||||
|
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
class FaceClass(object):
|
class FaceClass(object):
|
||||||
@ -24,20 +26,20 @@ class FaceClass(object):
|
|||||||
# Load a pics using the device label
|
# Load a pics using the device label
|
||||||
def load1(self, name,fname):
|
def load1(self, name,fname):
|
||||||
if not os.path.isfile(fname):
|
if not os.path.isfile(fname):
|
||||||
return False
|
return '{ "status":442565, "remark":"file name not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname)
|
||||||
self.imgs[name] = cv2.imread(fname)
|
self.imgs[name] = cv2.imread(fname)
|
||||||
#print(" Loaded %s from file %s" % (name, fname))
|
print(" Loaded %s from file %s" % (name, fname))
|
||||||
return True
|
return '{ "status":0, "remark":"OK", "name":"%s", "fname":"%s" }' % (name,fname)
|
||||||
|
|
||||||
|
|
||||||
def load(self, name1,name2,fname1,fname2):
|
def load2(self, name1,fname1,name2,fname2):
|
||||||
print("FaceClass loading files ....................... ")
|
print("FaceClass loading files ....................... ")
|
||||||
if not os.path.isfile(fname1):
|
if not os.path.isfile(fname1):
|
||||||
print("Cant access file ",fname1)
|
print("Cant access file ",fname1)
|
||||||
return -1
|
return '{ "status":442566, "remark":"file not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname1)
|
||||||
if not os.path.isfile(fname2):
|
if not os.path.isfile(fname2):
|
||||||
print("Cant access file ",fname2)
|
print("Cant access file ",fname2)
|
||||||
return -1
|
return '{ "status":442567, "remark":"file not found", "guilty_param":"fname", "guilty_value":"%s" }' % (fname2)
|
||||||
self.imfiles.append(fname1)
|
self.imfiles.append(fname1)
|
||||||
self.imfiles.append(fname2)
|
self.imfiles.append(fname2)
|
||||||
self.imnames[name1] = fname1
|
self.imnames[name1] = fname1
|
||||||
@ -49,38 +51,79 @@ class FaceClass(object):
|
|||||||
p2.imshow(name2, self.imgs[name2])
|
p2.imshow(name2, self.imgs[name2])
|
||||||
p1.show()
|
p1.show()
|
||||||
print("FaceClass: Loaded %s from file %s" % (name1, fname1))
|
print("FaceClass: Loaded %s from file %s" % (name1, fname1))
|
||||||
return 1
|
print("FaceClass: Loaded %s from file %s" % (name2, fname2))
|
||||||
|
return '{ "status":0, "remark":"OK", "name1":"%s", "fname1":"%s", "name2":"%s", "fname2":"%s" }' % (name1,fname1,name2,fname2)
|
||||||
|
|
||||||
|
|
||||||
def box(self, name, x, y, w, h):
|
# @doc draw a box and save with a new name
|
||||||
cv2.rectangle(self.imgs[name],(x,y),(x+w,y+h), (0,255,0), 4)
|
def rect(self, name, x1, y1, x2, y2, newname):
|
||||||
|
self.imgs[newname] = cv2.rectangle(self.imgs[name],(x1,y1),(x2,y2), (0,255,0), 4)
|
||||||
|
|
||||||
|
# @doc crop an image and save with a new name
|
||||||
|
def crop(self, name, x1, y1, x2, y2, newname):
|
||||||
|
print(x1,y1,x2,y2)
|
||||||
|
self.imgs[newname] = self.imgs[name][x1:x2, y1:y2]
|
||||||
|
|
||||||
# Load the config
|
# Load the config
|
||||||
def init(self):
|
def init(self):
|
||||||
with open("/etc/ukdi.json","r") as f:
|
with open("/etc/ukdi.json","r") as f:
|
||||||
self.conf = json.loads(f.read())
|
self.conf = json.loads(f.read())
|
||||||
|
|
||||||
|
|
||||||
# Find all faces
|
# Find all faces
|
||||||
def get_faces(self,name):
|
def get_faces(self,name):
|
||||||
return -1
|
return '{ "status":88241, "remark":"override this!" }'
|
||||||
|
|
||||||
# Find best face
|
# Find best face
|
||||||
def get_best_face(self):
|
def get_best_face(self):
|
||||||
return None
|
return '{ "status":88242, "remark":"override this!" }'
|
||||||
|
|
||||||
|
|
||||||
|
# @doc find the biggest face in the named image
|
||||||
|
def get_ideal(self, name):
|
||||||
|
self.boxes = []
|
||||||
|
self.get_faces(name)
|
||||||
|
found = -1
|
||||||
|
ix = 0
|
||||||
|
biggest = -1
|
||||||
|
for b in self.boxes:
|
||||||
|
print(json.dumps(b))
|
||||||
|
area = b[3] * b[2]
|
||||||
|
print(b,area)
|
||||||
|
if area > biggest:
|
||||||
|
found = ix
|
||||||
|
biggest = area
|
||||||
|
# box returns (left,top,right,bottom) - ffs
|
||||||
|
#self.crop(name,b[1],b[0],b[3],b[2],"_crop")
|
||||||
|
#self.rect(name,b[0],b[1],b[2],b[3],"_rect")
|
||||||
|
# rect expects x1,y1,x2,y2
|
||||||
|
self.rect(name,b[0],b[1],b[1]+b[3],b[0]+b[2],"_rect")
|
||||||
|
ix+=1
|
||||||
|
if found < 0:
|
||||||
|
return '{ "status":8572421, "remark":"no ideal face", "guilty_param":"name", "guilty_value":"%s" }' % name
|
||||||
|
return '{ "status":0, "remark":"OK", "faces":%d, "ideal_ix":%s, "ideal_area":%d, "boxes":%s }' % (len(self.boxes), found, biggest, json.dumps(self.boxes))
|
||||||
|
|
||||||
|
|
||||||
|
# return a base64 version of the pic in memory
|
||||||
|
def dump64(self,which,format="png"):
|
||||||
|
if format=="jpg":
|
||||||
|
_ , im_arr = cv2.imencode('.jpg', self.imgs[which])
|
||||||
|
img_as_txt = base64.b64encode(im_arr)
|
||||||
|
return b'data:image/jpeg;base64,'+img_as_txt
|
||||||
|
_ , im_arr = cv2.imencode('.png', self.imgs[which])
|
||||||
|
img_as_txt = base64.b64encode(im_arr)
|
||||||
|
return b'data:image/png;base64, '+img_as_txt
|
||||||
|
|
||||||
# Find landmarks
|
# Find landmarks
|
||||||
def get_landmarks(self):
|
def get_landmarks(self):
|
||||||
return '{ "status": 9999 }'
|
return '{ "status":88243, "remark":"override this!" }'
|
||||||
|
|
||||||
# Find metadata (age etc)
|
# Find metadata (age etc)
|
||||||
def get_metadata(self,name):
|
def get_metadata(self,name):
|
||||||
return None
|
return '{ "status":88244, "remark":"override this!" }'
|
||||||
|
|
||||||
# Match two faces
|
# Match two faces
|
||||||
def process(self,name1,name2):
|
def process(self,name1,name2):
|
||||||
return None
|
return '{ "status":88245, "remark":"override this!" }'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -40,9 +40,8 @@ import urllib.request
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ukdi import UKDI
|
|
||||||
|
|
||||||
class Facematch(UKDI):
|
class Facematch(object):
|
||||||
|
|
||||||
name = "face_recognition_face_match"
|
name = "face_recognition_face_match"
|
||||||
IN_ENGLISH = "A class for checking the similarity between two faces."
|
IN_ENGLISH = "A class for checking the similarity between two faces."
|
||||||
|
|||||||
105
src/yoloserv.py
105
src/yoloserv.py
@ -23,7 +23,7 @@ class yoloserv(object):
|
|||||||
|
|
||||||
yolo = None
|
yolo = None
|
||||||
device = None
|
device = None
|
||||||
imgdir = None
|
indir = None
|
||||||
outdir = None
|
outdir = None
|
||||||
facedetector = None
|
facedetector = None
|
||||||
lifedetector = None
|
lifedetector = None
|
||||||
@ -50,7 +50,7 @@ class yoloserv(object):
|
|||||||
|
|
||||||
print("Init yoloserv: %s @ %s %s " % (self.conf["yolo_devices"], self.conf["yolo_indir"], self.conf["yolo_outdir"]) )
|
print("Init yoloserv: %s @ %s %s " % (self.conf["yolo_devices"], self.conf["yolo_indir"], self.conf["yolo_outdir"]) )
|
||||||
self.devices = self.conf["yolo_devices"].split(",")
|
self.devices = self.conf["yolo_devices"].split(",")
|
||||||
self.imgdir = self.conf["yolo_indir"]
|
self.indir = self.conf["yolo_indir"]
|
||||||
self.outdir = self.conf["yolo_outdir"]
|
self.outdir = self.conf["yolo_outdir"]
|
||||||
|
|
||||||
# Object (face) matching
|
# Object (face) matching
|
||||||
@ -125,55 +125,79 @@ class yoloserv(object):
|
|||||||
self.intox_detector.init()
|
self.intox_detector.init()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Acquire image - the device used depends on the device list and what actual file was loaded as self.camera
|
# Acquire image - the device used depends on the device list and what actual file was loaded as self.camera
|
||||||
|
# @doc acquires an image from the camera (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_acquire(self,camidx=0):
|
def svc_acquire(self,camidx=0):
|
||||||
self.camera.acquire(camidx)
|
self.camera.acquire(camidx)
|
||||||
return "0 OK"
|
return '{ "status":0, "remark":"OK" }'
|
||||||
|
|
||||||
# Test
|
# Test- (opencv)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_show(self):
|
def svc_show(self):
|
||||||
self.camera.show()
|
self.camera.show()
|
||||||
return "0 OK"
|
return '{ "status":0, "remark":"OK" }'
|
||||||
|
|
||||||
|
# @doc saves the camera image to a file (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_save(self,filename,extn="png"):
|
def svc_save(self,filename,extn="png"):
|
||||||
self.camera.save(filename,extn)
|
self.camera.save(self.outdir + filename,extn)
|
||||||
return "0 OK"
|
return '{ "status":0, "remark":"OK", "outfile": "%s/%s.%s" }' % (self.outdir,filename,extn)
|
||||||
|
|
||||||
|
# @doc dumps the camera image as an array (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_dump(self):
|
def svc_dump(self):
|
||||||
buf = self.camera.dump()
|
buf = self.camera.dump()
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
|
# @doc dumps the camera image as a base64 encoded array (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_dump64(self):
|
def svc_dump64(self):
|
||||||
buf = self.camera.dump64()
|
buf = self.camera.dump64().decode()
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
|
# @doc dumps the camera image as an <img> tag for straight to HTML output (test OK CG 2024-0724)
|
||||||
|
@cherrypy.expose
|
||||||
|
def svc_dumphtml(self):
|
||||||
|
buf = self.camera.dump64().decode()
|
||||||
|
return "<html><img src='%s'/></html>" % buf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Find faces - the algorithm used depends on the device list and what actual file was loaded as self.camera
|
# Find faces - the algorithm used depends on the device list and what actual file was loaded as self.camera
|
||||||
|
|
||||||
# Simply load an image
|
# @doc load an image from a file using the specified yoloserv plugin (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_load_face(self,infile):
|
def svc_load_face(self,name,infile):
|
||||||
self.facematcher.load(infile)
|
return self.facematcher.load1(name, self.indir + infile)
|
||||||
|
|
||||||
|
# @doc load images from two files using the specified yoloserv plugin (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_detect_faces(self,infile):
|
def svc_load_faces(self,name1,infile1,name2,infile2):
|
||||||
nfaces = self.facematcher.detect_all(self.imgdir + infile)
|
return self.facematcher.load2(name1, self.indir + infile1, name2, self.indir + infile2)
|
||||||
return '{ "status":0, "remark":"found faces", "count":%d }' % (nfaces)
|
|
||||||
|
|
||||||
# Find the most prominent object
|
# @doc find all the faces in the named image that was loaded using the above calls (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_ideal_face(self,infile):
|
def svc_get_faces(self,which):
|
||||||
self.facematcher.detect_one(infile)
|
return self.facematcher.get_faces(which)
|
||||||
|
|
||||||
|
# @doc find the most prominent face in the named image that was loaded using the above calls (test OK CG 2024-0724)
|
||||||
|
# you can access the new ideal face (if present) with the image name "_ideal"
|
||||||
|
@cherrypy.expose
|
||||||
|
def svc_get_ideal(self,which):
|
||||||
|
return self.facematcher.get_ideal(which)
|
||||||
|
|
||||||
|
# @doc dumps the named image as an <img> tag for straight to HTML output (test OK CG 2024-0724)
|
||||||
|
@cherrypy.expose
|
||||||
|
def svc_imgtag(self,which):
|
||||||
|
buf = self.facematcher.dump64(which).decode()
|
||||||
|
return "<html><img src='%s'/></html>" % buf
|
||||||
|
|
||||||
# Match faces together
|
# Match faces together
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def svc_match_faces(self,infile1,infile2):
|
def svc_compare(self,name1,name2):
|
||||||
return self.facematcher.detect(infile1,infile2)
|
return self.facematcher.compare(name1,name2)
|
||||||
|
|
||||||
|
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@ -182,8 +206,24 @@ class yoloserv(object):
|
|||||||
os._exit(0)
|
os._exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# @doc find the landmarks in the named image (test OK CG 2024-0724)
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
def facematch(self,dev1,dev2):
|
def svc_get_landmarks(self,which):
|
||||||
|
return self.facematcher.get_landmarks(which)
|
||||||
|
|
||||||
|
# Match faces together
|
||||||
|
@cherrypy.expose
|
||||||
|
def svc_match_faces(self,infile1,infile2):
|
||||||
|
return self.facematcher.detect(infile1,infile2)
|
||||||
|
def json2obj(self,jsonx):
|
||||||
|
return json.laods(jsonx)
|
||||||
|
|
||||||
|
# @doc put all the steps for a retail facematch into one convenient functions
|
||||||
|
@cherrypy.expose
|
||||||
|
def retail_facematch(self,dev1,dev2):
|
||||||
if self.facematcher is None:
|
if self.facematcher is None:
|
||||||
return '{ "status":777244, "remark":"suitable yolo_device" }'
|
return '{ "status":777244, "remark":"suitable yolo_device" }'
|
||||||
|
|
||||||
@ -201,15 +241,18 @@ class yoloserv(object):
|
|||||||
if self.conf["emulate_facematch"]:
|
if self.conf["emulate_facematch"]:
|
||||||
return '{ "status":0, "remark":"OK", "data":{} }'
|
return '{ "status":0, "remark":"OK", "data":{} }'
|
||||||
|
|
||||||
status = self.facematcher.load(dev1, dev2, img1, img2)
|
obj = self.json2obj(self.facematcher.load_faces(dev1, img1, dev2, img2))
|
||||||
if not status:
|
if obj["status"] > 0:
|
||||||
return '{ "status":777242, "remark":"face loading failed", "guilty_param":"facematch", "guilty_value":"%s" }' % (status)
|
return jsonx
|
||||||
if self.facematcher.get_faces(dev1) < 1:
|
obj = self.json2obj(self.facematcher.get_faces(dev1))
|
||||||
return '{ "status":777243, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev1)
|
if obj["status"] > 0:
|
||||||
if self.facematcher.get_faces(dev2) < 1:
|
return jsonx
|
||||||
return '{ "status":777244, "remark":"face loading failed", "guilty_param":"image", "guilty_value":"%s" }' % (dev2)
|
obj = self.json2obj(self.facematcher.get_faces(dev2))
|
||||||
if self.facematcher.compute_scores(dev1,dev2) < 1:
|
if obj["status"] > 0:
|
||||||
return '{ "status":777245, "remark":"face matching failed", "guilty_param":"image", "guilty_value":"%s" }' % (self.facematcher.errstr)
|
return jsonx
|
||||||
|
obj = self.json2obj(self.facematcher.compute_scores(dev1,dev2))
|
||||||
|
if obj["status"] > 0:
|
||||||
|
return jsonx
|
||||||
jsonstr = self.facematcher.get_scores()
|
jsonstr = self.facematcher.get_scores()
|
||||||
return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr)
|
return '{ "status":0, "remark":"OK", "data": %s }' % (jsonstr)
|
||||||
|
|
||||||
@ -362,13 +405,13 @@ class yoloserv(object):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# Deal with the incoming call parameters
|
# Deal with the incoming call parameters
|
||||||
servport = int(sys.argv[1])
|
servport = int(sys.argv[1])
|
||||||
imgdir = sys.argv[2]
|
indir = sys.argv[2]
|
||||||
outdir = sys.argv[3]
|
outdir = sys.argv[3]
|
||||||
|
|
||||||
# Initialise the webserver
|
# Initialise the webserver
|
||||||
s = yoloserv()
|
s = yoloserv()
|
||||||
s.initialise()
|
s.initialise()
|
||||||
#s.initialise(imgdir,outdir,weightsfile)
|
#s.initialise(indir,outdir,weightsfile)
|
||||||
cherrypy.config.update({'server.socket_host': '0.0.0.0',
|
cherrypy.config.update({'server.socket_host': '0.0.0.0',
|
||||||
'server.socket_port': servport})
|
'server.socket_port': servport})
|
||||||
cherrypy.quickstart(s, '/')
|
cherrypy.quickstart(s, '/')
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user