* Fix preview when using frame enhancer

* Fix version conflict numpy vs. cv2

* Use latest numpy

* Introduce scale_face() to match size of temp frames and target frames

* Remove hardcoded backend for camera under Windows

* Up and downgrade some dependencies

* Up and downgrade some dependencies

* Up and downgrade some dependencies
This commit is contained in:
Henry Ruhs
2025-09-11 16:58:39 +02:00
committed by GitHub
parent 16e84b43ce
commit f3be23d19b
14 changed files with 48 additions and 10 deletions

View File

@@ -11,6 +11,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector,
from facefusion.common_helper import create_int_metavar, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.face_analyser import scale_face
from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
@@ -204,6 +205,7 @@ def process_frame(inputs : AgeModifierInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = modify_age(target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -11,6 +11,7 @@ import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion.common_helper import create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider
from facefusion.face_analyser import scale_face
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
from facefusion.face_selector import select_faces
@@ -415,6 +416,7 @@ def process_frame(inputs : DeepSwapperInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = swap_face(target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -10,6 +10,7 @@ import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion.common_helper import create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
@@ -255,6 +256,7 @@ def process_frame(inputs : ExpressionRestorerInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = restore_expression(target_face, target_vision_frame, temp_vision_frame)
return temp_vision_frame

View File

@@ -6,6 +6,7 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, state_manager, video_manager, wording
from facefusion.face_analyser import scale_face
from facefusion.face_helper import warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
from facefusion.face_selector import select_faces
@@ -218,6 +219,7 @@ def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = debug_face(target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -10,6 +10,7 @@ import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion.common_helper import create_float_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask
from facefusion.face_selector import select_faces
@@ -484,6 +485,7 @@ def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = edit_face(target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -8,6 +8,7 @@ import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion.common_helper import create_float_metavar, create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
@@ -363,6 +364,7 @@ def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = enhance_face(target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -12,7 +12,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector,
from facefusion.common_helper import get_first, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face
from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face, scale_face
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
from facefusion.face_selector import select_faces, sort_faces_by_order
@@ -688,6 +688,7 @@ def process_frame(inputs : FaceSwapperInputs) -> VisionFrame:
if source_face and target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = swap_face(source_face, target_face, temp_vision_frame)
return temp_vision_frame

View File

@@ -10,6 +10,7 @@ from facefusion import config, content_analyser, face_classifier, face_detector,
from facefusion.audio import read_static_voice
from facefusion.common_helper import create_float_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
@@ -269,6 +270,7 @@ def process_frame(inputs : LipSyncerInputs) -> VisionFrame:
if target_faces:
for target_face in target_faces:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = sync_lip(target_face, source_voice_frame, temp_vision_frame)
return temp_vision_frame