diff --git a/facefusion/camera_manager.py b/facefusion/camera_manager.py index aa3b351..a27e5fc 100644 --- a/facefusion/camera_manager.py +++ b/facefusion/camera_manager.py @@ -2,7 +2,6 @@ from typing import List import cv2 -from facefusion.common_helper import is_windows from facefusion.types import CameraPoolSet CAMERA_POOL_SET : CameraPoolSet =\ @@ -15,10 +14,7 @@ def get_local_camera_capture(camera_id : int) -> cv2.VideoCapture: camera_key = str(camera_id) if camera_key not in CAMERA_POOL_SET.get('capture'): - if is_windows(): - camera_capture = cv2.VideoCapture(camera_id, cv2.CAP_DSHOW) - else: - camera_capture = cv2.VideoCapture(camera_id) + camera_capture = cv2.VideoCapture(camera_id) if camera_capture.isOpened(): CAMERA_POOL_SET['capture'][camera_key] = camera_capture diff --git a/facefusion/face_analyser.py b/facefusion/face_analyser.py index b1dad44..855ebd7 100644 --- a/facefusion/face_analyser.py +++ b/facefusion/face_analyser.py @@ -122,3 +122,28 @@ def get_many_faces(vision_frames : List[VisionFrame]) -> List[Face]: many_faces.extend(faces) set_static_faces(vision_frame, faces) return many_faces + + +def scale_face(target_face : Face, target_vision_frame : VisionFrame, temp_vision_frame : VisionFrame) -> Face: + scale_x = temp_vision_frame.shape[1] / target_vision_frame.shape[1] + scale_y = temp_vision_frame.shape[0] / target_vision_frame.shape[0] + + bounding_box =\ + [ + target_face.bounding_box * scale_x, + target_face.bounding_box * scale_y, + target_face.bounding_box * scale_x, + target_face.bounding_box * scale_y + ] + landmark_set =\ + { + '5': target_face.landmark_set.get('5') * numpy.array([ scale_x, scale_y ]), + '5/68': target_face.landmark_set.get('5/68') * numpy.array([ scale_x, scale_y ]), + '68': target_face.landmark_set.get('68') * numpy.array([ scale_x, scale_y ]), + '68/5': target_face.landmark_set.get('68/5') * numpy.array([ scale_x, scale_y ]) + } + + return target_face._replace( + bounding_box = bounding_box, + landmark_set = landmark_set + ) diff --git a/facefusion/installer.py b/facefusion/installer.py index a363b06..b8b3ee0 100644 --- a/facefusion/installer.py +++ b/facefusion/installer.py @@ -12,7 +12,7 @@ from facefusion.common_helper import is_linux, is_windows ONNXRUNTIME_SET =\ { - 'default': ('onnxruntime', '1.22.0') + 'default': ('onnxruntime', '1.22.1') } if is_windows() or is_linux(): ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.22.0') diff --git a/facefusion/metadata.py b/facefusion/metadata.py index a058cf6..625746a 100644 --- a/facefusion/metadata.py +++ b/facefusion/metadata.py @@ -4,7 +4,7 @@ METADATA =\ { 'name': 'FaceFusion', 'description': 'Industry leading face manipulation platform', - 'version': '3.4.0', + 'version': '3.4.1', 'license': 'OpenRAIL-AS', 'author': 'Henry Ruhs', 'url': 'https://facefusion.io' diff --git a/facefusion/processors/modules/age_modifier.py b/facefusion/processors/modules/age_modifier.py index e6233f0..0719456 100755 --- a/facefusion/processors/modules/age_modifier.py +++ b/facefusion/processors/modules/age_modifier.py @@ -11,6 +11,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector, from facefusion.common_helper import create_int_metavar, is_macos from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.execution import has_execution_provider +from facefusion.face_analyser import scale_face from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_box_mask, create_occlusion_mask from facefusion.face_selector import select_faces @@ -204,6 +205,7 @@ def process_frame(inputs : AgeModifierInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = modify_age(target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/deep_swapper.py b/facefusion/processors/modules/deep_swapper.py index 42619f8..b9ac6d0 100755 --- a/facefusion/processors/modules/deep_swapper.py +++ b/facefusion/processors/modules/deep_swapper.py @@ -11,6 +11,7 @@ import facefusion.jobs.job_store from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording from facefusion.common_helper import create_int_metavar from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider +from facefusion.face_analyser import scale_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask from facefusion.face_selector import select_faces @@ -415,6 +416,7 @@ def process_frame(inputs : DeepSwapperInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = swap_face(target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/expression_restorer.py b/facefusion/processors/modules/expression_restorer.py index 83efcf6..5e7c53d 100755 --- a/facefusion/processors/modules/expression_restorer.py +++ b/facefusion/processors/modules/expression_restorer.py @@ -10,6 +10,7 @@ import facefusion.jobs.job_store from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording from facefusion.common_helper import create_int_metavar from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url +from facefusion.face_analyser import scale_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_box_mask, create_occlusion_mask from facefusion.face_selector import select_faces @@ -255,6 +256,7 @@ def process_frame(inputs : ExpressionRestorerInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = restore_expression(target_face, target_vision_frame, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/face_debugger.py b/facefusion/processors/modules/face_debugger.py index 633674a..3f6b363 100755 --- a/facefusion/processors/modules/face_debugger.py +++ b/facefusion/processors/modules/face_debugger.py @@ -6,6 +6,7 @@ import numpy import facefusion.jobs.job_manager import facefusion.jobs.job_store from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, state_manager, video_manager, wording +from facefusion.face_analyser import scale_face from facefusion.face_helper import warp_face_by_face_landmark_5 from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask from facefusion.face_selector import select_faces @@ -218,6 +219,7 @@ def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = debug_face(target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/face_editor.py b/facefusion/processors/modules/face_editor.py index fd42f24..7448c2a 100755 --- a/facefusion/processors/modules/face_editor.py +++ b/facefusion/processors/modules/face_editor.py @@ -10,6 +10,7 @@ import facefusion.jobs.job_store from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording from facefusion.common_helper import create_float_metavar from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url +from facefusion.face_analyser import scale_face from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_box_mask from facefusion.face_selector import select_faces @@ -484,6 +485,7 @@ def process_frame(inputs : FaceEditorInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = edit_face(target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/face_enhancer.py b/facefusion/processors/modules/face_enhancer.py index 7cc8b4b..fe710a4 100755 --- a/facefusion/processors/modules/face_enhancer.py +++ b/facefusion/processors/modules/face_enhancer.py @@ -8,6 +8,7 @@ import facefusion.jobs.job_store from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording from facefusion.common_helper import create_float_metavar, create_int_metavar from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url +from facefusion.face_analyser import scale_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_box_mask, create_occlusion_mask from facefusion.face_selector import select_faces @@ -363,6 +364,7 @@ def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = enhance_face(target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/face_swapper.py b/facefusion/processors/modules/face_swapper.py index cfa9dd9..394e9ed 100755 --- a/facefusion/processors/modules/face_swapper.py +++ b/facefusion/processors/modules/face_swapper.py @@ -12,7 +12,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector, from facefusion.common_helper import get_first, is_macos from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.execution import has_execution_provider -from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face +from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face, scale_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask from facefusion.face_selector import select_faces, sort_faces_by_order @@ -688,6 +688,7 @@ def process_frame(inputs : FaceSwapperInputs) -> VisionFrame: if source_face and target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = swap_face(source_face, target_face, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/processors/modules/lip_syncer.py b/facefusion/processors/modules/lip_syncer.py index 4683eee..c015be2 100755 --- a/facefusion/processors/modules/lip_syncer.py +++ b/facefusion/processors/modules/lip_syncer.py @@ -10,6 +10,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector, from facefusion.audio import read_static_voice from facefusion.common_helper import create_float_metavar from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url +from facefusion.face_analyser import scale_face from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5 from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask from facefusion.face_selector import select_faces @@ -269,6 +270,7 @@ def process_frame(inputs : LipSyncerInputs) -> VisionFrame: if target_faces: for target_face in target_faces: + target_face = scale_face(target_face, target_vision_frame, temp_vision_frame) temp_vision_frame = sync_lip(target_face, source_voice_frame, temp_vision_frame) return temp_vision_frame diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index f7d032d..0f216f4 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -241,6 +241,8 @@ def process_preview_frame(reference_vision_frame : VisionFrame, source_vision_fr }) logger.enable() + temp_vision_frame = cv2.resize(temp_vision_frame, target_vision_frame.shape[1::-1]) + if preview_mode == 'frame-by-frame': return numpy.hstack((target_vision_frame, temp_vision_frame)) diff --git a/requirements.txt b/requirements.txt index fd98ac2..1b80cca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ gradio-rangeslider==0.0.8 gradio==5.42.0 numpy==2.3.2 -onnx==1.18.0 -onnxruntime==1.22.0 +onnx==1.19.0 +onnxruntime==1.22.1 opencv-python==4.12.0.88 psutil==7.0.0 tqdm==4.67.1