yoloserv/modules/paravision/liveness/session.py

65 lines
2.1 KiB
Python

import pyrealsense2 as rs
import cv2
import numpy as np
from .tensorrt.engine import Engine
from .utils import (
estimate_depth_bounding_box,
expand_bbox_to_edge_and_crop,
model_location,
)
from .exceptions import InvalidWindowSizeException
# validity constants
WINDOW_SIZE = 5
class CameraParams(object):
def __init__(self, depth_intr, color_intr, color_to_depth_extr):
self.depth_intr = depth_intr
self.color_intr = color_intr
self.color_to_depth_extr = color_to_depth_extr
class CameraParams(object):
def __init__(self, depth_intr, color_intr, color_to_depth_extr):
self.depth_intr = depth_intr
self.color_intr = color_intr
self.color_to_depth_extr = color_to_depth_extr
class Liveness(object):
def __init__(self, model_path=None, settings={}):
if model_path is None:
model_path = model_location()
self.predictor = Engine(model_path, settings)
def load_depth_data_from_file(self, file_path):
return np.loadtxt(file_path, dtype=np.int16, delimiter=",")
def write_depth_data_to_file(self, file_path, depth_data):
np.savetxt(file_path, depth_data, fmt="%d", delimiter=",")
def crop_depth_frame(self, camera_params, depth_frame, bounding_box):
if camera_params is None or depth_frame is None or bounding_box is None:
raise Exception("Invalid input arguments")
proj_depth_bb = estimate_depth_bounding_box(bounding_box, camera_params)
cropped_depth_frame = expand_bbox_to_edge_and_crop(depth_frame, proj_depth_bb)
return cropped_depth_frame
def compute_liveness_probability(self, depth_imgs):
if len(depth_imgs) != WINDOW_SIZE:
raise InvalidWindowSizeException("Windows size must equal 5")
resized = [
cv2.resize(
depth_img.astype(np.float32),
(self.predictor.input_shape[1], self.predictor.input_shape[0]),
)
for depth_img in depth_imgs
]
frame_probs = self.predictor.predict(resized)
return np.mean(frame_probs)