* Rename calcXXX to calculateXXX * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Add migraphx support * Use True for the flags * Add migraphx support * add face-swapper-weight * add face-swapper-weight to facefusion.ini * changes * change choice * Fix typing for xxxWeight * Feat/log inference session (#906) * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Log inference session, Introduce time helper * Mark as NEXT * Follow industry standard x1, x2, y1 and y2 * Follow industry standard x1, x2, y1 and y2 * Follow industry standard in terms of naming (#908) * Follow industry standard in terms of naming * Improve xxx_embedding naming * Fix norm vs. norms * Reduce timeout to 5 * Sort out voice_extractor once again * changes * Introduce many to the occlusion mask (#910) * Introduce many to the occlusion mask * Then we use minimum * Add support for wmv * Run platform tests before has_execution_provider (#911) * Add support for wmv * Introduce benchmark mode (#912) * Honestly makes no difference to me * Honestly makes no difference to me * Fix wording * Bring back YuNet (#922) * Reintroduce YuNet without cv2 dependency * Fix variable naming * Avoid RGB to YUV colorshift using libx264rgb * Avoid RGB to YUV colorshift using libx264rgb * Make libx264 the default again * Make libx264 the default again * Fix types in ffmpeg builder * Fix quality stuff in ffmpeg builder * Fix quality stuff in ffmpeg builder * Add libx264rgb to test * Revamp Processors (#923) * Introduce new concept of pure target frames * Radical refactoring of process flow * Introduce new concept of pure target frames * Fix webcam * Minor improvements * Minor improvements * Use deque for video processing * Use deque for video processing * Extend the video manager * Polish deque * Polish deque * Deque is not even used * Improve speed with multiple futures * Fix temp frame mutation and * Fix RAM usage * Remove old types and manage method * Remove execution_queue_count * Use init_state for benchmarker to avoid issues * add voice extractor option * Change the order of voice extractor in code * Use official download urls * Use official download urls * add gui * fix preview * Add remote updates for voice extractor * fix crash on headless-run * update test_job_helper.py * Fix it for good * Remove pointless method * Fix types and unused imports * Revamp reference (#925) * Initial revamp of face references * Initial revamp of face references * Initial revamp of face references * Terminate find_similar_faces * Improve find mutant faces * Improve find mutant faces * Move sort where it belongs * Forward reference vision frame * Forward reference vision frame also in preview * Fix reference selection * Use static video frame * Fix CI * Remove reference type from frame processors * Improve some naming * Fix types and unused imports * Fix find mutant faces * Fix find mutant faces * Fix imports * Correct naming * Correct naming * simplify pad * Improve webcam performance on highres * Camera manager (#932) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Remove optional * Fix naming in webcam options * Avoid using temp faces (#933) * output video scale * Fix imports * output image scale * upscale fix (not limiter) * add unit test scale_resolution & remove unused methods * fix and add test * fix * change pack_resolution * fix tests * Simplify output scale testing * Fix benchmark UI * Fix benchmark UI * Update dependencies * Introduce REAL multi gpu support using multi dimensional inference pool (#935) * Introduce REAL multi gpu support using multi dimensional inference pool * Remove the MULTI:GPU flag * Restore "processing stop" * Restore "processing stop" * Remove old templates * Go fill in with caching * add expression restorer areas * re-arrange * rename method * Fix stop for extract frames and merge video * Replace arcface_converter models with latest crossface models * Replace arcface_converter models with latest crossface models * Move module logs to debug mode * Refactor/streamer (#938) * Introduce webcam manager * Fix order * Rename to camera manager, improve video manager * Fix CI * Fix naming in webcam options * Move logic over to streamer * Fix streamer, improve webcam experience * Improve webcam experience * Revert method * Revert method * Improve webcam again * Use release on capture instead * Only forward valid frames * Fix resolution logging * Add AVIF support * Add AVIF support * Limit avif to unix systems * Drop avif * Drop avif * Drop avif * Default to Documents in the UI if output path is not set * Update wording.py (#939) "succeed" is grammatically incorrect in the given context. To succeed is the infinitive form of the verb. Correct would be either "succeeded" or alternatively a form involving the noun "success". * Fix more grammar issue * Fix more grammar issue * Sort out caching * Move webcam choices back to UI * Move preview options to own file (#940) * Fix Migraphx execution provider * Fix benchmark * Reuse blend frame method * Fix CI * Fix CI * Fix CI * Hotfix missing check in face debugger, Enable logger for preview * Fix reference selection (#942) * Fix reference selection * Fix reference selection * Fix reference selection * Fix reference selection * Side by side preview (#941) * Initial side by side preview * More work on preview, remove UI only stuff from vision.py * Improve more * Use fit frame * Add different fit methods for vision * Improve preview part2 * Improve preview part3 * Improve preview part4 * Remove none as choice * Remove useless methods * Fix CI * Fix naming * use 1024 as preview resolution default * Fix fit_cover_frame * Uniform fit_xxx_frame methods * Add back disabled logger * Use ui choices alias * Extract select face logic from processors (#943) * Extract select face logic from processors to use it for face by face in preview * Fix order * Remove old code * Merge methods * Refactor face debugger (#944) * Refactor huge method of face debugger * Remove text metrics from face debugger * Remove useless copy of temp frame * Resort methods * Fix spacing * Remove old method * Fix hard exit to work without signals * Prevent upscaling for face-by-face * Switch to version * Improve exiting --------- Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com> Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com> Co-authored-by: Rafael Tappe Maestro <rafael@tappemaestro.com>
424 lines
17 KiB
Python
424 lines
17 KiB
Python
from functools import lru_cache
|
|
from typing import List, Sequence, Tuple
|
|
|
|
import cv2
|
|
import numpy
|
|
|
|
from facefusion import inference_manager, state_manager
|
|
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
|
from facefusion.face_helper import create_rotation_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points
|
|
from facefusion.filesystem import resolve_relative_path
|
|
from facefusion.thread_helper import thread_semaphore
|
|
from facefusion.types import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame
|
|
from facefusion.vision import restrict_frame, unpack_resolution
|
|
|
|
|
|
@lru_cache()
|
|
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
|
return\
|
|
{
|
|
'retinaface':
|
|
{
|
|
'hashes':
|
|
{
|
|
'retinaface':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'retinaface_10g.hash'),
|
|
'path': resolve_relative_path('../.assets/models/retinaface_10g.hash')
|
|
}
|
|
},
|
|
'sources':
|
|
{
|
|
'retinaface':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'retinaface_10g.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
|
|
}
|
|
}
|
|
},
|
|
'scrfd':
|
|
{
|
|
'hashes':
|
|
{
|
|
'scrfd':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.hash'),
|
|
'path': resolve_relative_path('../.assets/models/scrfd_2.5g.hash')
|
|
}
|
|
},
|
|
'sources':
|
|
{
|
|
'scrfd':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx')
|
|
}
|
|
}
|
|
},
|
|
'yolo_face':
|
|
{
|
|
'hashes':
|
|
{
|
|
'yolo_face':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'yoloface_8n.hash'),
|
|
'path': resolve_relative_path('../.assets/models/yoloface_8n.hash')
|
|
}
|
|
},
|
|
'sources':
|
|
{
|
|
'yolo_face':
|
|
{
|
|
'url': resolve_download_url('models-3.0.0', 'yoloface_8n.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx')
|
|
}
|
|
}
|
|
},
|
|
'yunet':
|
|
{
|
|
'hashes':
|
|
{
|
|
'yunet':
|
|
{
|
|
'url': resolve_download_url('models-3.4.0', 'yunet_2023_mar.hash'),
|
|
'path': resolve_relative_path('../.assets/models/yunet_2023_mar.hash')
|
|
}
|
|
},
|
|
'sources':
|
|
{
|
|
'yunet':
|
|
{
|
|
'url': resolve_download_url('models-3.4.0', 'yunet_2023_mar.onnx'),
|
|
'path': resolve_relative_path('../.assets/models/yunet_2023_mar.onnx')
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
def get_inference_pool() -> InferencePool:
|
|
model_names = [ state_manager.get_item('face_detector_model') ]
|
|
_, model_source_set = collect_model_downloads()
|
|
|
|
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
|
|
|
|
|
def clear_inference_pool() -> None:
|
|
model_names = [ state_manager.get_item('face_detector_model') ]
|
|
inference_manager.clear_inference_pool(__name__, model_names)
|
|
|
|
|
|
def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
|
|
model_set = create_static_model_set('full')
|
|
model_hash_set = {}
|
|
model_source_set = {}
|
|
|
|
for face_detector_model in [ 'retinaface', 'scrfd', 'yolo_face', 'yunet' ]:
|
|
if state_manager.get_item('face_detector_model') in [ 'many', face_detector_model ]:
|
|
model_hash_set[face_detector_model] = model_set.get(face_detector_model).get('hashes').get(face_detector_model)
|
|
model_source_set[face_detector_model] = model_set.get(face_detector_model).get('sources').get(face_detector_model)
|
|
|
|
return model_hash_set, model_source_set
|
|
|
|
|
|
def pre_check() -> bool:
|
|
model_hash_set, model_source_set = collect_model_downloads()
|
|
|
|
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
|
|
|
|
|
def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
all_bounding_boxes : List[BoundingBox] = []
|
|
all_face_scores : List[Score] = []
|
|
all_face_landmarks_5 : List[FaceLandmark5] = []
|
|
|
|
if state_manager.get_item('face_detector_model') in [ 'many', 'retinaface' ]:
|
|
bounding_boxes, face_scores, face_landmarks_5 = detect_with_retinaface(vision_frame, state_manager.get_item('face_detector_size'))
|
|
all_bounding_boxes.extend(bounding_boxes)
|
|
all_face_scores.extend(face_scores)
|
|
all_face_landmarks_5.extend(face_landmarks_5)
|
|
|
|
if state_manager.get_item('face_detector_model') in [ 'many', 'scrfd' ]:
|
|
bounding_boxes, face_scores, face_landmarks_5 = detect_with_scrfd(vision_frame, state_manager.get_item('face_detector_size'))
|
|
all_bounding_boxes.extend(bounding_boxes)
|
|
all_face_scores.extend(face_scores)
|
|
all_face_landmarks_5.extend(face_landmarks_5)
|
|
|
|
if state_manager.get_item('face_detector_model') in [ 'many', 'yolo_face' ]:
|
|
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yolo_face(vision_frame, state_manager.get_item('face_detector_size'))
|
|
all_bounding_boxes.extend(bounding_boxes)
|
|
all_face_scores.extend(face_scores)
|
|
all_face_landmarks_5.extend(face_landmarks_5)
|
|
|
|
if state_manager.get_item('face_detector_model') == 'yunet':
|
|
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yunet(vision_frame, state_manager.get_item('face_detector_size'))
|
|
all_bounding_boxes.extend(bounding_boxes)
|
|
all_face_scores.extend(face_scores)
|
|
all_face_landmarks_5.extend(face_landmarks_5)
|
|
|
|
all_bounding_boxes = [ normalize_bounding_box(all_bounding_box) for all_bounding_box in all_bounding_boxes ]
|
|
return all_bounding_boxes, all_face_scores, all_face_landmarks_5
|
|
|
|
|
|
def detect_faces_by_angle(vision_frame : VisionFrame, face_angle : Angle) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
rotation_matrix, rotation_size = create_rotation_matrix_and_size(face_angle, vision_frame.shape[:2][::-1])
|
|
rotation_vision_frame = cv2.warpAffine(vision_frame, rotation_matrix, rotation_size)
|
|
rotation_inverse_matrix = cv2.invertAffineTransform(rotation_matrix)
|
|
bounding_boxes, face_scores, face_landmarks_5 = detect_faces(rotation_vision_frame)
|
|
bounding_boxes = [ transform_bounding_box(bounding_box, rotation_inverse_matrix) for bounding_box in bounding_boxes ]
|
|
face_landmarks_5 = [ transform_points(face_landmark_5, rotation_inverse_matrix) for face_landmark_5 in face_landmarks_5 ]
|
|
return bounding_boxes, face_scores, face_landmarks_5
|
|
|
|
|
|
def detect_with_retinaface(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
bounding_boxes = []
|
|
face_scores = []
|
|
face_landmarks_5 = []
|
|
feature_strides = [ 8, 16, 32 ]
|
|
feature_map_channel = 3
|
|
anchor_total = 2
|
|
face_detector_score = state_manager.get_item('face_detector_score')
|
|
face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
|
|
temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
|
|
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
|
|
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
|
|
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
|
|
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
|
|
detection = forward_with_retinaface(detect_vision_frame)
|
|
|
|
for index, feature_stride in enumerate(feature_strides):
|
|
face_scores_raw = detection[index]
|
|
keep_indices = numpy.where(face_scores_raw >= face_detector_score)[0]
|
|
|
|
if numpy.any(keep_indices):
|
|
stride_height = face_detector_height // feature_stride
|
|
stride_width = face_detector_width // feature_stride
|
|
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
|
|
bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
|
|
face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
|
|
|
|
for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
|
|
bounding_boxes.append(numpy.array(
|
|
[
|
|
bounding_box_raw[0] * ratio_width,
|
|
bounding_box_raw[1] * ratio_height,
|
|
bounding_box_raw[2] * ratio_width,
|
|
bounding_box_raw[3] * ratio_height
|
|
]))
|
|
|
|
for face_score_raw in face_scores_raw[keep_indices]:
|
|
face_scores.append(face_score_raw[0])
|
|
|
|
for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
|
|
face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
|
|
|
|
return bounding_boxes, face_scores, face_landmarks_5
|
|
|
|
|
|
def detect_with_scrfd(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
bounding_boxes = []
|
|
face_scores = []
|
|
face_landmarks_5 = []
|
|
feature_strides = [ 8, 16, 32 ]
|
|
feature_map_channel = 3
|
|
anchor_total = 2
|
|
face_detector_score = state_manager.get_item('face_detector_score')
|
|
face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
|
|
temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
|
|
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
|
|
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
|
|
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
|
|
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
|
|
detection = forward_with_scrfd(detect_vision_frame)
|
|
|
|
for index, feature_stride in enumerate(feature_strides):
|
|
face_scores_raw = detection[index]
|
|
keep_indices = numpy.where(face_scores_raw >= face_detector_score)[0]
|
|
|
|
if numpy.any(keep_indices):
|
|
stride_height = face_detector_height // feature_stride
|
|
stride_width = face_detector_width // feature_stride
|
|
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
|
|
bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
|
|
face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
|
|
|
|
for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
|
|
bounding_boxes.append(numpy.array(
|
|
[
|
|
bounding_box_raw[0] * ratio_width,
|
|
bounding_box_raw[1] * ratio_height,
|
|
bounding_box_raw[2] * ratio_width,
|
|
bounding_box_raw[3] * ratio_height
|
|
]))
|
|
|
|
for face_score_raw in face_scores_raw[keep_indices]:
|
|
face_scores.append(face_score_raw[0])
|
|
|
|
for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
|
|
face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
|
|
|
|
return bounding_boxes, face_scores, face_landmarks_5
|
|
|
|
|
|
def detect_with_yolo_face(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
bounding_boxes = []
|
|
face_scores = []
|
|
face_landmarks_5 = []
|
|
face_detector_score = state_manager.get_item('face_detector_score')
|
|
face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
|
|
temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
|
|
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
|
|
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
|
|
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
|
|
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ 0, 1 ])
|
|
detection = forward_with_yolo_face(detect_vision_frame)
|
|
detection = numpy.squeeze(detection).T
|
|
bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = numpy.split(detection, [ 4, 5 ], axis = 1)
|
|
keep_indices = numpy.where(face_scores_raw > face_detector_score)[0]
|
|
|
|
if numpy.any(keep_indices):
|
|
bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = bounding_boxes_raw[keep_indices], face_scores_raw[keep_indices], face_landmarks_5_raw[keep_indices]
|
|
|
|
for bounding_box_raw in bounding_boxes_raw:
|
|
bounding_boxes.append(numpy.array(
|
|
[
|
|
(bounding_box_raw[0] - bounding_box_raw[2] / 2) * ratio_width,
|
|
(bounding_box_raw[1] - bounding_box_raw[3] / 2) * ratio_height,
|
|
(bounding_box_raw[0] + bounding_box_raw[2] / 2) * ratio_width,
|
|
(bounding_box_raw[1] + bounding_box_raw[3] / 2) * ratio_height
|
|
]))
|
|
|
|
face_scores = face_scores_raw.ravel().tolist()
|
|
face_landmarks_5_raw[:, 0::3] = (face_landmarks_5_raw[:, 0::3]) * ratio_width
|
|
face_landmarks_5_raw[:, 1::3] = (face_landmarks_5_raw[:, 1::3]) * ratio_height
|
|
|
|
for face_landmark_raw_5 in face_landmarks_5_raw:
|
|
face_landmarks_5.append(numpy.array(face_landmark_raw_5.reshape(-1, 3)[:, :2]))
|
|
|
|
return bounding_boxes, face_scores, face_landmarks_5
|
|
|
|
|
|
def detect_with_yunet(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
|
|
bounding_boxes = []
|
|
face_scores = []
|
|
face_landmarks_5 = []
|
|
feature_strides = [ 8, 16, 32 ]
|
|
feature_map_channel = 3
|
|
anchor_total = 1
|
|
face_detector_score = state_manager.get_item('face_detector_score')
|
|
face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
|
|
temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
|
|
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
|
|
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
|
|
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
|
|
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ 0, 255 ])
|
|
detection = forward_with_yunet(detect_vision_frame)
|
|
|
|
for index, feature_stride in enumerate(feature_strides):
|
|
face_scores_raw = (detection[index] * detection[index + feature_map_channel]).reshape(-1)
|
|
keep_indices = numpy.where(face_scores_raw >= face_detector_score)[0]
|
|
|
|
if numpy.any(keep_indices):
|
|
stride_height = face_detector_height // feature_stride
|
|
stride_width = face_detector_width // feature_stride
|
|
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
|
|
bounding_boxes_center = detection[index + feature_map_channel * 2].squeeze(0)[:, :2] * feature_stride + anchors
|
|
bounding_boxes_size = numpy.exp(detection[index + feature_map_channel * 2].squeeze(0)[:, 2:4]) * feature_stride
|
|
face_landmarks_5_raw = detection[index + feature_map_channel * 3].squeeze(0)
|
|
|
|
bounding_boxes_raw = numpy.stack(
|
|
[
|
|
bounding_boxes_center[:, 0] - bounding_boxes_size[:, 0] / 2,
|
|
bounding_boxes_center[:, 1] - bounding_boxes_size[:, 1] / 2,
|
|
bounding_boxes_center[:, 0] + bounding_boxes_size[:, 0] / 2,
|
|
bounding_boxes_center[:, 1] + bounding_boxes_size[:, 1] / 2
|
|
], axis = -1)
|
|
|
|
for bounding_box_raw in bounding_boxes_raw[keep_indices]:
|
|
bounding_boxes.append(numpy.array(
|
|
[
|
|
bounding_box_raw[0] * ratio_width,
|
|
bounding_box_raw[1] * ratio_height,
|
|
bounding_box_raw[2] * ratio_width,
|
|
bounding_box_raw[3] * ratio_height
|
|
]))
|
|
|
|
face_scores.extend(face_scores_raw[keep_indices])
|
|
face_landmarks_5_raw = numpy.concatenate(
|
|
[
|
|
face_landmarks_5_raw[:, [0, 1]] * feature_stride + anchors,
|
|
face_landmarks_5_raw[:, [2, 3]] * feature_stride + anchors,
|
|
face_landmarks_5_raw[:, [4, 5]] * feature_stride + anchors,
|
|
face_landmarks_5_raw[:, [6, 7]] * feature_stride + anchors,
|
|
face_landmarks_5_raw[:, [8, 9]] * feature_stride + anchors
|
|
], axis = -1).reshape(-1, 5, 2)
|
|
|
|
for face_landmark_raw_5 in face_landmarks_5_raw[keep_indices]:
|
|
face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
|
|
|
|
return bounding_boxes, face_scores, face_landmarks_5
|
|
|
|
|
|
def forward_with_retinaface(detect_vision_frame : VisionFrame) -> Detection:
|
|
face_detector = get_inference_pool().get('retinaface')
|
|
|
|
with thread_semaphore():
|
|
detection = face_detector.run(None,
|
|
{
|
|
'input': detect_vision_frame
|
|
})
|
|
|
|
return detection
|
|
|
|
|
|
def forward_with_scrfd(detect_vision_frame : VisionFrame) -> Detection:
|
|
face_detector = get_inference_pool().get('scrfd')
|
|
|
|
with thread_semaphore():
|
|
detection = face_detector.run(None,
|
|
{
|
|
'input': detect_vision_frame
|
|
})
|
|
|
|
return detection
|
|
|
|
|
|
def forward_with_yolo_face(detect_vision_frame : VisionFrame) -> Detection:
|
|
face_detector = get_inference_pool().get('yolo_face')
|
|
|
|
with thread_semaphore():
|
|
detection = face_detector.run(None,
|
|
{
|
|
'input': detect_vision_frame
|
|
})
|
|
|
|
return detection
|
|
|
|
|
|
def forward_with_yunet(detect_vision_frame : VisionFrame) -> Detection:
|
|
face_detector = get_inference_pool().get('yunet')
|
|
|
|
with thread_semaphore():
|
|
detection = face_detector.run(None,
|
|
{
|
|
'input': detect_vision_frame
|
|
})
|
|
|
|
return detection
|
|
|
|
|
|
def prepare_detect_frame(temp_vision_frame : VisionFrame, face_detector_size : str) -> VisionFrame:
|
|
face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
|
|
detect_vision_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
|
|
detect_vision_frame[:temp_vision_frame.shape[0], :temp_vision_frame.shape[1], :] = temp_vision_frame
|
|
detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
|
return detect_vision_frame
|
|
|
|
|
|
def normalize_detect_frame(detect_vision_frame : VisionFrame, normalize_range : Sequence[int]) -> VisionFrame:
|
|
if normalize_range == [ -1, 1 ]:
|
|
return (detect_vision_frame - 127.5) / 128.0
|
|
if normalize_range == [ 0, 1 ]:
|
|
return detect_vision_frame / 255.0
|
|
return detect_vision_frame
|