3.2.0
This commit is contained in:
Henry Ruhs
2025-04-29 11:33:12 +02:00
committed by GitHub
124 changed files with 2325 additions and 1499 deletions

View File

@@ -1,5 +1,5 @@
[flake8] [flake8]
select = E3, E4, F, I1, I2 select = E22, E23, E24, E27, E3, E4, E7, F, I1, I2
per-file-ignores = facefusion.py:E402, install.py:E402 per-file-ignores = facefusion.py:E402, install.py:E402
plugins = flake8-import-order plugins = flake8-import-order
application_import_names = facefusion application_import_names = facefusion

BIN
.github/preview.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 1.3 MiB

View File

@@ -1,3 +1,3 @@
MIT license OpenRAIL-AS license
Copyright (c) 2024 Henry Ruhs Copyright (c) 2025 Henry Ruhs

View File

@@ -5,7 +5,7 @@ FaceFusion
[![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci) [![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci)
[![Coverage Status](https://img.shields.io/coveralls/facefusion/facefusion.svg)](https://coveralls.io/r/facefusion/facefusion) [![Coverage Status](https://img.shields.io/coveralls/facefusion/facefusion.svg)](https://coveralls.io/r/facefusion/facefusion)
![License](https://img.shields.io/badge/license-MIT-green) ![License](https://img.shields.io/badge/license-OpenRAIL--AS-green)
Preview Preview

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -49,12 +49,13 @@ keep_temp =
output_image_quality = output_image_quality =
output_image_resolution = output_image_resolution =
output_audio_encoder = output_audio_encoder =
output_audio_quality =
output_audio_volume =
output_video_encoder = output_video_encoder =
output_video_preset = output_video_preset =
output_video_quality = output_video_quality =
output_video_resolution = output_video_resolution =
output_video_fps = output_video_fps =
skip_audio =
[processors] [processors]
processors = processors =
@@ -113,3 +114,4 @@ system_memory_limit =
[misc] [misc]
log_level = log_level =
halt_on_error =

View File

@@ -1,7 +1,7 @@
import os import os
import sys import sys
from facefusion.typing import AppContext from facefusion.types import AppContext
def detect_app_context() -> AppContext: def detect_app_context() -> AppContext:

View File

@@ -1,9 +1,9 @@
from facefusion import state_manager from facefusion import state_manager
from facefusion.filesystem import is_image, is_video, list_directory from facefusion.filesystem import get_file_name, is_image, is_video, resolve_file_paths
from facefusion.jobs import job_store from facefusion.jobs import job_store
from facefusion.normalizer import normalize_fps, normalize_padding from facefusion.normalizer import normalize_fps, normalize_padding
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.typing import ApplyStateItem, Args from facefusion.types import ApplyStateItem, Args
from facefusion.vision import create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, pack_resolution from facefusion.vision import create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, pack_resolution
@@ -92,6 +92,8 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
else: else:
apply_state_item('output_image_resolution', pack_resolution(output_image_resolution)) apply_state_item('output_image_resolution', pack_resolution(output_image_resolution))
apply_state_item('output_audio_encoder', args.get('output_audio_encoder')) apply_state_item('output_audio_encoder', args.get('output_audio_encoder'))
apply_state_item('output_audio_quality', args.get('output_audio_quality'))
apply_state_item('output_audio_volume', args.get('output_audio_volume'))
apply_state_item('output_video_encoder', args.get('output_video_encoder')) apply_state_item('output_video_encoder', args.get('output_video_encoder'))
apply_state_item('output_video_preset', args.get('output_video_preset')) apply_state_item('output_video_preset', args.get('output_video_preset'))
apply_state_item('output_video_quality', args.get('output_video_quality')) apply_state_item('output_video_quality', args.get('output_video_quality'))
@@ -105,9 +107,8 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
if args.get('output_video_fps') or is_video(args.get('target_path')): if args.get('output_video_fps') or is_video(args.get('target_path')):
output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path')) output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path'))
apply_state_item('output_video_fps', output_video_fps) apply_state_item('output_video_fps', output_video_fps)
apply_state_item('skip_audio', args.get('skip_audio'))
# processors # processors
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
apply_state_item('processors', args.get('processors')) apply_state_item('processors', args.get('processors'))
for processor_module in get_processors_modules(available_processors): for processor_module in get_processors_modules(available_processors):
processor_module.apply_args(args, apply_state_item) processor_module.apply_args(args, apply_state_item)
@@ -128,6 +129,7 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('system_memory_limit', args.get('system_memory_limit')) apply_state_item('system_memory_limit', args.get('system_memory_limit'))
# misc # misc
apply_state_item('log_level', args.get('log_level')) apply_state_item('log_level', args.get('log_level'))
apply_state_item('halt_on_error', args.get('halt_on_error'))
# jobs # jobs
apply_state_item('job_id', args.get('job_id')) apply_state_item('job_id', args.get('job_id'))
apply_state_item('job_status', args.get('job_status')) apply_state_item('job_status', args.get('job_status'))

View File

@@ -3,25 +3,26 @@ from typing import Any, List, Optional
import numpy import numpy
import scipy import scipy
from numpy._typing import NDArray from numpy.typing import NDArray
from facefusion.ffmpeg import read_audio_buffer from facefusion.ffmpeg import read_audio_buffer
from facefusion.filesystem import is_audio from facefusion.filesystem import is_audio
from facefusion.typing import Audio, AudioFrame, Fps, Mel, MelFilterBank, Spectrogram from facefusion.types import Audio, AudioFrame, Fps, Mel, MelFilterBank, Spectrogram
from facefusion.voice_extractor import batch_extract_voice from facefusion.voice_extractor import batch_extract_voice
@lru_cache(maxsize = 128) @lru_cache()
def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
return read_audio(audio_path, fps) return read_audio(audio_path, fps)
def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
sample_rate = 48000 audio_sample_rate = 48000
channel_total = 2 audio_sample_size = 16
audio_channel_total = 2
if is_audio(audio_path): if is_audio(audio_path):
audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total) audio_buffer = read_audio_buffer(audio_path, audio_sample_rate, audio_sample_size, audio_channel_total)
audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2) audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
audio = prepare_audio(audio) audio = prepare_audio(audio)
spectrogram = create_spectrogram(audio) spectrogram = create_spectrogram(audio)
@@ -30,21 +31,22 @@ def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
return None return None
@lru_cache(maxsize = 128) @lru_cache()
def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
return read_voice(audio_path, fps) return read_voice(audio_path, fps)
def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
sample_rate = 48000 voice_sample_rate = 48000
channel_total = 2 voice_sample_size = 16
chunk_size = 240 * 1024 voice_channel_total = 2
step_size = 180 * 1024 voice_chunk_size = 240 * 1024
voice_step_size = 180 * 1024
if is_audio(audio_path): if is_audio(audio_path):
audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total) audio_buffer = read_audio_buffer(audio_path, voice_sample_rate, voice_sample_size, voice_channel_total)
audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2) audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
audio = batch_extract_voice(audio, chunk_size, step_size) audio = batch_extract_voice(audio, voice_chunk_size, voice_step_size)
audio = prepare_voice(audio) audio = prepare_voice(audio)
spectrogram = create_spectrogram(audio) spectrogram = create_spectrogram(audio)
audio_frames = extract_audio_frames(spectrogram, fps) audio_frames = extract_audio_frames(spectrogram, fps)
@@ -60,6 +62,20 @@ def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Opti
return None return None
def extract_audio_frames(spectrogram: Spectrogram, fps: Fps) -> List[AudioFrame]:
audio_frames = []
mel_filter_total = 80
audio_step_size = 16
indices = numpy.arange(0, spectrogram.shape[1], mel_filter_total / fps).astype(numpy.int16)
indices = indices[indices >= audio_step_size]
for index in indices:
start = max(0, index - audio_step_size)
audio_frames.append(spectrogram[:, start:index])
return audio_frames
def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]:
if is_audio(audio_path): if is_audio(audio_path):
voice_frames = read_static_voice(audio_path, fps) voice_frames = read_static_voice(audio_path, fps)
@@ -70,8 +86,8 @@ def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Opti
def create_empty_audio_frame() -> AudioFrame: def create_empty_audio_frame() -> AudioFrame:
mel_filter_total = 80 mel_filter_total = 80
step_size = 16 audio_step_size = 16
audio_frame = numpy.zeros((mel_filter_total, step_size)).astype(numpy.int16) audio_frame = numpy.zeros((mel_filter_total, audio_step_size)).astype(numpy.int16)
return audio_frame return audio_frame
@@ -84,10 +100,10 @@ def prepare_audio(audio : Audio) -> Audio:
def prepare_voice(audio : Audio) -> Audio: def prepare_voice(audio : Audio) -> Audio:
sample_rate = 48000 audio_sample_rate = 48000
resample_rate = 16000 audio_resample_rate = 16000
audio_resample_factor = round(len(audio) * audio_resample_rate / audio_sample_rate)
audio = scipy.signal.resample(audio, int(len(audio) * resample_rate / sample_rate)) audio = scipy.signal.resample(audio, audio_resample_factor)
audio = prepare_audio(audio) audio = prepare_audio(audio)
return audio return audio
@@ -101,19 +117,20 @@ def convert_mel_to_hertz(mel : Mel) -> NDArray[Any]:
def create_mel_filter_bank() -> MelFilterBank: def create_mel_filter_bank() -> MelFilterBank:
audio_sample_rate = 16000
audio_min_frequency = 55.0
audio_max_frequency = 7600.0
mel_filter_total = 80 mel_filter_total = 80
mel_bin_total = 800 mel_bin_total = 800
sample_rate = 16000
min_frequency = 55.0
max_frequency = 7600.0
mel_filter_bank = numpy.zeros((mel_filter_total, mel_bin_total // 2 + 1)) mel_filter_bank = numpy.zeros((mel_filter_total, mel_bin_total // 2 + 1))
mel_frequency_range = numpy.linspace(convert_hertz_to_mel(min_frequency), convert_hertz_to_mel(max_frequency), mel_filter_total + 2) mel_frequency_range = numpy.linspace(convert_hertz_to_mel(audio_min_frequency), convert_hertz_to_mel(audio_max_frequency), mel_filter_total + 2)
indices = numpy.floor((mel_bin_total + 1) * convert_mel_to_hertz(mel_frequency_range) / sample_rate).astype(numpy.int16) indices = numpy.floor((mel_bin_total + 1) * convert_mel_to_hertz(mel_frequency_range) / audio_sample_rate).astype(numpy.int16)
for index in range(mel_filter_total): for index in range(mel_filter_total):
start = indices[index] start = indices[index]
end = indices[index + 1] end = indices[index + 1]
mel_filter_bank[index, start:end] = scipy.signal.windows.triang(end - start) mel_filter_bank[index, start:end] = scipy.signal.windows.triang(end - start)
return mel_filter_bank return mel_filter_bank
@@ -124,16 +141,3 @@ def create_spectrogram(audio : Audio) -> Spectrogram:
spectrogram = scipy.signal.stft(audio, nperseg = mel_bin_total, nfft = mel_bin_total, noverlap = mel_bin_overlap)[2] spectrogram = scipy.signal.stft(audio, nperseg = mel_bin_total, nfft = mel_bin_total, noverlap = mel_bin_overlap)[2]
spectrogram = numpy.dot(mel_filter_bank, numpy.abs(spectrogram)) spectrogram = numpy.dot(mel_filter_bank, numpy.abs(spectrogram))
return spectrogram return spectrogram
def extract_audio_frames(spectrogram : Spectrogram, fps : Fps) -> List[AudioFrame]:
mel_filter_total = 80
step_size = 16
audio_frames = []
indices = numpy.arange(0, spectrogram.shape[1], mel_filter_total / fps).astype(numpy.int16)
indices = indices[indices >= step_size]
for index in indices:
start = max(0, index - step_size)
audio_frames.append(spectrogram[:, start:index])
return audio_frames

View File

@@ -2,14 +2,14 @@ import logging
from typing import List, Sequence from typing import List, Sequence
from facefusion.common_helper import create_float_range, create_int_range from facefusion.common_helper import create_float_range, create_int_range
from facefusion.typing import Angle, DownloadProvider, DownloadProviderSet, DownloadScope, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, JobStatus, LogLevel, LogLevelSet, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset, Race, Score, TempFrameFormat, UiWorkflow, VideoMemoryStrategy from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, WebcamMode
face_detector_set : FaceDetectorSet =\ face_detector_set : FaceDetectorSet =\
{ {
'many': [ '640x640' ], 'many': [ '640x640' ],
'retinaface': [ '160x160', '320x320', '480x480', '512x512', '640x640' ], 'retinaface': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ], 'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
'yoloface': [ '640x640' ] 'yolo_face': [ '640x640' ]
} }
face_detector_models : List[FaceDetectorModel] = list(face_detector_set.keys()) face_detector_models : List[FaceDetectorModel] = list(face_detector_set.keys())
face_landmarker_models : List[FaceLandmarkerModel] = [ 'many', '2dfan4', 'peppa_wutz' ] face_landmarker_models : List[FaceLandmarkerModel] = [ 'many', '2dfan4', 'peppa_wutz' ]
@@ -17,7 +17,7 @@ face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ]
face_selector_orders : List[FaceSelectorOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ] face_selector_orders : List[FaceSelectorOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
face_selector_genders : List[Gender] = [ 'female', 'male' ] face_selector_genders : List[Gender] = [ 'female', 'male' ]
face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ] face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ]
face_occluder_models : List[FaceOccluderModel] = [ 'xseg_1', 'xseg_2' ] face_occluder_models : List[FaceOccluderModel] = [ 'xseg_1', 'xseg_2', 'xseg_3' ]
face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ] face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ]
face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ] face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
face_mask_region_set : FaceMaskRegionSet =\ face_mask_region_set : FaceMaskRegionSet =\
@@ -34,35 +34,81 @@ face_mask_region_set : FaceMaskRegionSet =\
'lower-lip': 13 'lower-lip': 13
} }
face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys()) face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys())
temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpg', 'png' ]
output_audio_encoders : List[OutputAudioEncoder] = [ 'aac', 'libmp3lame', 'libopus', 'libvorbis' ] audio_type_set : AudioTypeSet =\
output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox' ] {
output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ] 'flac': 'audio/flac',
'm4a': 'audio/mp4',
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'opus': 'audio/opus',
'wav': 'audio/x-wav'
}
image_type_set : ImageTypeSet =\
{
'bmp': 'image/bmp',
'jpeg': 'image/jpeg',
'png': 'image/png',
'tiff': 'image/tiff',
'webp': 'image/webp'
}
video_type_set : VideoTypeSet =\
{
'avi': 'video/x-msvideo',
'm4v': 'video/mp4',
'mkv': 'video/x-matroska',
'mp4': 'video/mp4',
'mov': 'video/quicktime',
'webm': 'video/webm'
}
audio_formats : List[AudioFormat] = list(audio_type_set.keys())
image_formats : List[ImageFormat] = list(image_type_set.keys())
video_formats : List[VideoFormat] = list(video_type_set.keys())
temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpeg', 'png', 'tiff' ]
output_encoder_set : EncoderSet =\
{
'audio': [ 'flac', 'aac', 'libmp3lame', 'libopus', 'libvorbis', 'pcm_s16le', 'pcm_s32le' ],
'video': [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox', 'rawvideo' ]
}
output_audio_encoders : List[AudioEncoder] = output_encoder_set.get('audio')
output_video_encoders : List[VideoEncoder] = output_encoder_set.get('video')
output_video_presets : List[VideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]
image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ] image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ]
video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ] video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ]
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
execution_provider_set : ExecutionProviderSet =\ execution_provider_set : ExecutionProviderSet =\
{ {
'cpu': 'CPUExecutionProvider',
'coreml': 'CoreMLExecutionProvider',
'cuda': 'CUDAExecutionProvider', 'cuda': 'CUDAExecutionProvider',
'tensorrt': 'TensorrtExecutionProvider',
'directml': 'DmlExecutionProvider', 'directml': 'DmlExecutionProvider',
'openvino': 'OpenVINOExecutionProvider',
'rocm': 'ROCMExecutionProvider', 'rocm': 'ROCMExecutionProvider',
'tensorrt': 'TensorrtExecutionProvider' 'openvino': 'OpenVINOExecutionProvider',
'coreml': 'CoreMLExecutionProvider',
'cpu': 'CPUExecutionProvider'
} }
execution_providers : List[ExecutionProvider] = list(execution_provider_set.keys()) execution_providers : List[ExecutionProvider] = list(execution_provider_set.keys())
download_provider_set : DownloadProviderSet =\ download_provider_set : DownloadProviderSet =\
{ {
'github': 'github':
{ {
'url': 'https://github.com', 'urls':
[
'https://github.com'
],
'path': '/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}' 'path': '/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}'
}, },
'huggingface': 'huggingface':
{ {
'url': 'https://huggingface.co', 'urls':
[
'https://huggingface.co',
'https://hf-mirror.com'
],
'path': '/facefusion/{base_name}/resolve/main/{file_name}' 'path': '/facefusion/{base_name}/resolve/main/{file_name}'
} }
} }
@@ -92,6 +138,8 @@ face_landmarker_score_range : Sequence[Score] = create_float_range(0.0, 1.0, 0.0
face_mask_blur_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05) face_mask_blur_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
face_mask_padding_range : Sequence[int] = create_int_range(0, 100, 1) face_mask_padding_range : Sequence[int] = create_int_range(0, 100, 1)
face_selector_age_range : Sequence[int] = create_int_range(0, 100, 1) face_selector_age_range : Sequence[int] = create_int_range(0, 100, 1)
reference_face_distance_range : Sequence[float] = create_float_range(0.0, 1.5, 0.05) reference_face_distance_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
output_image_quality_range : Sequence[int] = create_int_range(0, 100, 1) output_image_quality_range : Sequence[int] = create_int_range(0, 100, 1)
output_audio_quality_range : Sequence[int] = create_int_range(0, 100, 1)
output_audio_volume_range : Sequence[int] = create_int_range(0, 100, 1)
output_video_quality_range : Sequence[int] = create_int_range(0, 100, 1) output_video_quality_range : Sequence[int] = create_int_range(0, 100, 1)

35
facefusion/cli_helper.py Normal file
View File

@@ -0,0 +1,35 @@
from typing import Tuple
from facefusion.logger import get_package_logger
from facefusion.types import TableContents, TableHeaders
def render_table(headers : TableHeaders, contents : TableContents) -> None:
package_logger = get_package_logger()
table_column, table_separator = create_table_parts(headers, contents)
package_logger.info(table_separator)
package_logger.info(table_column.format(*headers))
package_logger.info(table_separator)
for content in contents:
content = [ value if value else '' for value in content ]
package_logger.info(table_column.format(*content))
package_logger.info(table_separator)
def create_table_parts(headers : TableHeaders, contents : TableContents) -> Tuple[str, str]:
column_parts = []
separator_parts = []
widths = [ len(header) for header in headers ]
for content in contents:
for index, value in enumerate(content):
widths[index] = max(widths[index], len(str(value)))
for width in widths:
column_parts.append('{:<' + str(width) + '}')
separator_parts.append('-' * width)
return '| ' + ' | '.join(column_parts) + ' |', '+-' + '-+-'.join(separator_parts) + '-+'

View File

@@ -1,5 +1,5 @@
import platform import platform
from typing import Any, Optional, Sequence from typing import Any, Iterable, Optional, Reversible, Sequence
def is_linux() -> bool: def is_linux() -> bool:
@@ -50,23 +50,35 @@ def calc_float_step(float_range : Sequence[float]) -> float:
return round(float_range[1] - float_range[0], 2) return round(float_range[1] - float_range[0], 2)
def cast_int(value : Any) -> Optional[Any]: def cast_int(value : Any) -> Optional[int]:
try: try:
return int(value) return int(value)
except (ValueError, TypeError): except (ValueError, TypeError):
return None return None
def cast_float(value : Any) -> Optional[Any]: def cast_float(value : Any) -> Optional[float]:
try: try:
return float(value) return float(value)
except (ValueError, TypeError): except (ValueError, TypeError):
return None return None
def cast_bool(value : Any) -> Optional[bool]:
if value == 'True':
return True
if value == 'False':
return False
return None
def get_first(__list__ : Any) -> Any: def get_first(__list__ : Any) -> Any:
return next(iter(__list__), None) if isinstance(__list__, Iterable):
return next(iter(__list__), None)
return None
def get_last(__list__ : Any) -> Any: def get_last(__list__ : Any) -> Any:
return next(reversed(__list__), None) if isinstance(__list__, Reversible):
return next(reversed(__list__), None)
return None

View File

@@ -1,92 +1,74 @@
from configparser import ConfigParser from configparser import ConfigParser
from typing import Any, List, Optional from typing import List, Optional
from facefusion import state_manager from facefusion import state_manager
from facefusion.common_helper import cast_float, cast_int from facefusion.common_helper import cast_bool, cast_float, cast_int
CONFIG = None CONFIG_PARSER = None
def get_config() -> ConfigParser: def get_config_parser() -> ConfigParser:
global CONFIG global CONFIG_PARSER
if CONFIG is None: if CONFIG_PARSER is None:
CONFIG = ConfigParser() CONFIG_PARSER = ConfigParser()
CONFIG.read(state_manager.get_item('config_path'), encoding = 'utf-8') CONFIG_PARSER.read(state_manager.get_item('config_path'), encoding = 'utf-8')
return CONFIG return CONFIG_PARSER
def clear_config() -> None: def clear_config_parser() -> None:
global CONFIG global CONFIG_PARSER
CONFIG = None CONFIG_PARSER = None
def get_str_value(key : str, fallback : Optional[str] = None) -> Optional[str]: def get_str_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[str]:
value = get_value_by_notation(key) config_parser = get_config_parser()
if value or fallback: if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return str(value or fallback) return config_parser.get(section, option)
return fallback
def get_int_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[int]:
config_parser = get_config_parser()
if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return config_parser.getint(section, option)
return cast_int(fallback)
def get_float_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[float]:
config_parser = get_config_parser()
if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return config_parser.getfloat(section, option)
return cast_float(fallback)
def get_bool_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[bool]:
config_parser = get_config_parser()
if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return config_parser.getboolean(section, option)
return cast_bool(fallback)
def get_str_list(section : str, option : str, fallback : Optional[str] = None) -> Optional[List[str]]:
config_parser = get_config_parser()
if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return config_parser.get(section, option).split()
if fallback:
return fallback.split()
return None return None
def get_int_value(key : str, fallback : Optional[str] = None) -> Optional[int]: def get_int_list(section : str, option : str, fallback : Optional[str] = None) -> Optional[List[int]]:
value = get_value_by_notation(key) config_parser = get_config_parser()
if value or fallback: if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
return cast_int(value or fallback) return list(map(int, config_parser.get(section, option).split()))
return None if fallback:
return list(map(int, fallback.split()))
def get_float_value(key : str, fallback : Optional[str] = None) -> Optional[float]:
value = get_value_by_notation(key)
if value or fallback:
return cast_float(value or fallback)
return None
def get_bool_value(key : str, fallback : Optional[str] = None) -> Optional[bool]:
value = get_value_by_notation(key)
if value == 'True' or fallback == 'True':
return True
if value == 'False' or fallback == 'False':
return False
return None
def get_str_list(key : str, fallback : Optional[str] = None) -> Optional[List[str]]:
value = get_value_by_notation(key)
if value or fallback:
return [ str(value) for value in (value or fallback).split(' ') ]
return None
def get_int_list(key : str, fallback : Optional[str] = None) -> Optional[List[int]]:
value = get_value_by_notation(key)
if value or fallback:
return [ cast_int(value) for value in (value or fallback).split(' ') ]
return None
def get_float_list(key : str, fallback : Optional[str] = None) -> Optional[List[float]]:
value = get_value_by_notation(key)
if value or fallback:
return [ cast_float(value) for value in (value or fallback).split(' ') ]
return None
def get_value_by_notation(key : str) -> Optional[Any]:
config = get_config()
if '.' in key:
section, name = key.split('.')
if section in config and name in config[section]:
return config[section][name]
if key in config:
return config[key]
return None return None

View File

@@ -1,6 +1,6 @@
from functools import lru_cache from functools import lru_cache
from typing import List
import cv2
import numpy import numpy
from tqdm import tqdm from tqdm import tqdm
@@ -8,11 +8,9 @@ from facefusion import inference_manager, state_manager, wording
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import DownloadScope, Fps, InferencePool, ModelOptions, ModelSet, VisionFrame from facefusion.types import Detection, DownloadScope, Fps, InferencePool, ModelOptions, ModelSet, Score, VisionFrame
from facefusion.vision import detect_video_fps, get_video_frame, read_image from facefusion.vision import detect_video_fps, fit_frame, read_image, read_video_frame
PROBABILITY_LIMIT = 0.80
RATE_LIMIT = 10
STREAM_COUNTER = 0 STREAM_COUNTER = 0
@@ -20,48 +18,50 @@ STREAM_COUNTER = 0
def create_static_model_set(download_scope : DownloadScope) -> ModelSet: def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
return\ return\
{ {
'open_nsfw': 'yolo_nsfw':
{ {
'hashes': 'hashes':
{ {
'content_analyser': 'content_analyser':
{ {
'url': resolve_download_url('models-3.0.0', 'open_nsfw.hash'), 'url': resolve_download_url('models-3.2.0', 'yolo_11m_nsfw.hash'),
'path': resolve_relative_path('../.assets/models/open_nsfw.hash') 'path': resolve_relative_path('../.assets/models/yolo_11m_nsfw.hash')
} }
}, },
'sources': 'sources':
{ {
'content_analyser': 'content_analyser':
{ {
'url': resolve_download_url('models-3.0.0', 'open_nsfw.onnx'), 'url': resolve_download_url('models-3.2.0', 'yolo_11m_nsfw.onnx'),
'path': resolve_relative_path('../.assets/models/open_nsfw.onnx') 'path': resolve_relative_path('../.assets/models/yolo_11m_nsfw.onnx')
} }
}, },
'size': (224, 224), 'size': (640, 640)
'mean': [ 104, 117, 123 ]
} }
} }
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ 'yolo_nsfw' ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ 'yolo_nsfw' ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
return create_static_model_set('full').get('open_nsfw') return create_static_model_set('full').get('yolo_nsfw')
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool: def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool:
@@ -74,31 +74,9 @@ def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool:
def analyse_frame(vision_frame : VisionFrame) -> bool: def analyse_frame(vision_frame : VisionFrame) -> bool:
vision_frame = prepare_frame(vision_frame) nsfw_scores = detect_nsfw(vision_frame)
probability = forward(vision_frame)
return probability > PROBABILITY_LIMIT return len(nsfw_scores) > 0
def forward(vision_frame : VisionFrame) -> float:
content_analyser = get_inference_pool().get('content_analyser')
with conditional_thread_semaphore():
probability = content_analyser.run(None,
{
'input': vision_frame
})[0][0][1]
return probability
def prepare_frame(vision_frame : VisionFrame) -> VisionFrame:
model_size = get_model_options().get('size')
model_mean = get_model_options().get('mean')
vision_frame = cv2.resize(vision_frame, model_size).astype(numpy.float32)
vision_frame -= numpy.array(model_mean).astype(numpy.float32)
vision_frame = numpy.expand_dims(vision_frame, axis = 0)
return vision_frame
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -112,15 +90,55 @@ def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int
video_fps = detect_video_fps(video_path) video_fps = detect_video_fps(video_path)
frame_range = range(trim_frame_start, trim_frame_end) frame_range = range(trim_frame_start, trim_frame_end)
rate = 0.0 rate = 0.0
total = 0
counter = 0 counter = 0
with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
for frame_number in frame_range: for frame_number in frame_range:
if frame_number % int(video_fps) == 0: if frame_number % int(video_fps) == 0:
vision_frame = get_video_frame(video_path, frame_number) vision_frame = read_video_frame(video_path, frame_number)
total += 1
if analyse_frame(vision_frame): if analyse_frame(vision_frame):
counter += 1 counter += 1
rate = counter * int(video_fps) / len(frame_range) * 100 if counter > 0 and total > 0:
progress.update() rate = counter / total * 100
progress.set_postfix(rate = rate) progress.set_postfix(rate = rate)
return rate > RATE_LIMIT progress.update()
return rate > 10.0
def detect_nsfw(vision_frame : VisionFrame) -> List[Score]:
nsfw_scores = []
model_size = get_model_options().get('size')
temp_vision_frame = fit_frame(vision_frame, model_size)
detect_vision_frame = prepare_detect_frame(temp_vision_frame)
detection = forward(detect_vision_frame)
detection = numpy.squeeze(detection).T
nsfw_scores_raw = numpy.amax(detection[:, 4:], axis = 1)
keep_indices = numpy.where(nsfw_scores_raw > 0.2)[0]
if numpy.any(keep_indices):
nsfw_scores_raw = nsfw_scores_raw[keep_indices]
nsfw_scores = nsfw_scores_raw.ravel().tolist()
return nsfw_scores
def forward(vision_frame : VisionFrame) -> Detection:
content_analyser = get_inference_pool().get('content_analyser')
with conditional_thread_semaphore():
detection = content_analyser.run(None,
{
'input': vision_frame
})
return detection
def prepare_detect_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
detect_vision_frame = temp_vision_frame / 255.0
detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
return detect_vision_frame

View File

@@ -6,17 +6,17 @@ from time import time
import numpy import numpy
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, voice_extractor, wording from facefusion import cli_helper, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, voice_extractor, wording
from facefusion.args import apply_args, collect_job_args, reduce_job_args, reduce_step_args from facefusion.args import apply_args, collect_job_args, reduce_job_args, reduce_step_args
from facefusion.common_helper import get_first from facefusion.common_helper import get_first
from facefusion.content_analyser import analyse_image, analyse_video from facefusion.content_analyser import analyse_image, analyse_video
from facefusion.download import conditional_download_hashes, conditional_download_sources from facefusion.download import conditional_download_hashes, conditional_download_sources
from facefusion.exit_helper import conditional_exit, graceful_exit, hard_exit from facefusion.exit_helper import graceful_exit, hard_exit
from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face
from facefusion.face_selector import sort_and_filter_faces from facefusion.face_selector import sort_and_filter_faces
from facefusion.face_store import append_reference_face, clear_reference_faces, get_reference_faces from facefusion.face_store import append_reference_face, clear_reference_faces, get_reference_faces
from facefusion.ffmpeg import copy_image, extract_frames, finalize_image, merge_video, replace_audio, restore_audio from facefusion.ffmpeg import copy_image, extract_frames, finalize_image, merge_video, replace_audio, restore_audio
from facefusion.filesystem import filter_audio_paths, is_image, is_video, list_directory, resolve_file_pattern from facefusion.filesystem import filter_audio_paths, get_file_name, is_image, is_video, resolve_file_paths, resolve_file_pattern
from facefusion.jobs import job_helper, job_manager, job_runner from facefusion.jobs import job_helper, job_manager, job_runner
from facefusion.jobs.job_list import compose_job_list from facefusion.jobs.job_list import compose_job_list
from facefusion.memory import limit_system_memory from facefusion.memory import limit_system_memory
@@ -24,62 +24,70 @@ from facefusion.processors.core import get_processors_modules
from facefusion.program import create_program from facefusion.program import create_program
from facefusion.program_helper import validate_args from facefusion.program_helper import validate_args
from facefusion.statistics import conditional_log_statistics from facefusion.statistics import conditional_log_statistics
from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, get_temp_frame_paths, move_temp_file from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths
from facefusion.typing import Args, ErrorCode from facefusion.types import Args, ErrorCode
from facefusion.vision import get_video_frame, pack_resolution, read_image, read_static_images, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution from facefusion.vision import pack_resolution, read_image, read_static_images, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution
def cli() -> None: def cli() -> None:
signal.signal(signal.SIGINT, lambda signal_number, frame: graceful_exit(0)) if pre_check():
program = create_program() signal.signal(signal.SIGINT, lambda signal_number, frame: graceful_exit(0))
program = create_program()
if validate_args(program): if validate_args(program):
args = vars(program.parse_args()) args = vars(program.parse_args())
apply_args(args, state_manager.init_item) apply_args(args, state_manager.init_item)
if state_manager.get_item('command'): if state_manager.get_item('command'):
logger.init(state_manager.get_item('log_level')) logger.init(state_manager.get_item('log_level'))
route(args) route(args)
else:
program.print_help()
else: else:
program.print_help() hard_exit(2)
else: else:
hard_exit(2) hard_exit(2)
def route(args : Args) -> None: def route(args : Args) -> None:
system_memory_limit = state_manager.get_item('system_memory_limit') system_memory_limit = state_manager.get_item('system_memory_limit')
if system_memory_limit and system_memory_limit > 0: if system_memory_limit and system_memory_limit > 0:
limit_system_memory(system_memory_limit) limit_system_memory(system_memory_limit)
if state_manager.get_item('command') == 'force-download': if state_manager.get_item('command') == 'force-download':
error_code = force_download() error_code = force_download()
return conditional_exit(error_code) return hard_exit(error_code)
if state_manager.get_item('command') in [ 'job-list', 'job-create', 'job-submit', 'job-submit-all', 'job-delete', 'job-delete-all', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]: if state_manager.get_item('command') in [ 'job-list', 'job-create', 'job-submit', 'job-submit-all', 'job-delete', 'job-delete-all', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]:
if not job_manager.init_jobs(state_manager.get_item('jobs_path')): if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1) hard_exit(1)
error_code = route_job_manager(args) error_code = route_job_manager(args)
hard_exit(error_code) hard_exit(error_code)
if not pre_check():
return conditional_exit(2)
if state_manager.get_item('command') == 'run': if state_manager.get_item('command') == 'run':
import facefusion.uis.core as ui import facefusion.uis.core as ui
if not common_pre_check() or not processors_pre_check(): if not common_pre_check() or not processors_pre_check():
return conditional_exit(2) return hard_exit(2)
for ui_layout in ui.get_ui_layouts_modules(state_manager.get_item('ui_layouts')): for ui_layout in ui.get_ui_layouts_modules(state_manager.get_item('ui_layouts')):
if not ui_layout.pre_check(): if not ui_layout.pre_check():
return conditional_exit(2) return hard_exit(2)
ui.init() ui.init()
ui.launch() ui.launch()
if state_manager.get_item('command') == 'headless-run': if state_manager.get_item('command') == 'headless-run':
if not job_manager.init_jobs(state_manager.get_item('jobs_path')): if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1) hard_exit(1)
error_core = process_headless(args) error_core = process_headless(args)
hard_exit(error_core) hard_exit(error_core)
if state_manager.get_item('command') == 'batch-run': if state_manager.get_item('command') == 'batch-run':
if not job_manager.init_jobs(state_manager.get_item('jobs_path')): if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1) hard_exit(1)
error_core = process_batch(args) error_core = process_batch(args)
hard_exit(error_core) hard_exit(error_core)
if state_manager.get_item('command') in [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]: if state_manager.get_item('command') in [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]:
if not job_manager.init_jobs(state_manager.get_item('jobs_path')): if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1) hard_exit(1)
@@ -91,9 +99,11 @@ def pre_check() -> bool:
if sys.version_info < (3, 10): if sys.version_info < (3, 10):
logger.error(wording.get('python_not_supported').format(version = '3.10'), __name__) logger.error(wording.get('python_not_supported').format(version = '3.10'), __name__)
return False return False
if not shutil.which('curl'): if not shutil.which('curl'):
logger.error(wording.get('curl_not_installed'), __name__) logger.error(wording.get('curl_not_installed'), __name__)
return False return False
if not shutil.which('ffmpeg'): if not shutil.which('ffmpeg'):
logger.error(wording.get('ffmpeg_not_installed'), __name__) logger.error(wording.get('ffmpeg_not_installed'), __name__)
return False return False
@@ -133,17 +143,17 @@ def force_download() -> ErrorCode:
face_recognizer, face_recognizer,
voice_extractor voice_extractor
] ]
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
processor_modules = get_processors_modules(available_processors) processor_modules = get_processors_modules(available_processors)
for module in common_modules + processor_modules: for module in common_modules + processor_modules:
if hasattr(module, 'create_static_model_set'): if hasattr(module, 'create_static_model_set'):
for model in module.create_static_model_set(state_manager.get_item('download_scope')).values(): for model in module.create_static_model_set(state_manager.get_item('download_scope')).values():
model_hashes = model.get('hashes') model_hash_set = model.get('hashes')
model_sources = model.get('sources') model_source_set = model.get('sources')
if model_hashes and model_sources: if model_hash_set and model_source_set:
if not conditional_download_hashes(model_hashes) or not conditional_download_sources(model_sources): if not conditional_download_hashes(model_hash_set) or not conditional_download_sources(model_source_set):
return 1 return 1
return 0 return 0
@@ -154,39 +164,45 @@ def route_job_manager(args : Args) -> ErrorCode:
job_headers, job_contents = compose_job_list(state_manager.get_item('job_status')) job_headers, job_contents = compose_job_list(state_manager.get_item('job_status'))
if job_contents: if job_contents:
logger.table(job_headers, job_contents) cli_helper.render_table(job_headers, job_contents)
return 0 return 0
return 1 return 1
if state_manager.get_item('command') == 'job-create': if state_manager.get_item('command') == 'job-create':
if job_manager.create_job(state_manager.get_item('job_id')): if job_manager.create_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_created').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('job_created').format(job_id = state_manager.get_item('job_id')), __name__)
return 0 return 0
logger.error(wording.get('job_not_created').format(job_id = state_manager.get_item('job_id')), __name__) logger.error(wording.get('job_not_created').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-submit': if state_manager.get_item('command') == 'job-submit':
if job_manager.submit_job(state_manager.get_item('job_id')): if job_manager.submit_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_submitted').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('job_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
return 0 return 0
logger.error(wording.get('job_not_submitted').format(job_id = state_manager.get_item('job_id')), __name__) logger.error(wording.get('job_not_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-submit-all': if state_manager.get_item('command') == 'job-submit-all':
if job_manager.submit_jobs(): if job_manager.submit_jobs(state_manager.get_item('halt_on_error')):
logger.info(wording.get('job_all_submitted'), __name__) logger.info(wording.get('job_all_submitted'), __name__)
return 0 return 0
logger.error(wording.get('job_all_not_submitted'), __name__) logger.error(wording.get('job_all_not_submitted'), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-delete': if state_manager.get_item('command') == 'job-delete':
if job_manager.delete_job(state_manager.get_item('job_id')): if job_manager.delete_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_deleted').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('job_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
return 0 return 0
logger.error(wording.get('job_not_deleted').format(job_id = state_manager.get_item('job_id')), __name__) logger.error(wording.get('job_not_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-delete-all': if state_manager.get_item('command') == 'job-delete-all':
if job_manager.delete_jobs(): if job_manager.delete_jobs(state_manager.get_item('halt_on_error')):
logger.info(wording.get('job_all_deleted'), __name__) logger.info(wording.get('job_all_deleted'), __name__)
return 0 return 0
logger.error(wording.get('job_all_not_deleted'), __name__) logger.error(wording.get('job_all_not_deleted'), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-add-step': if state_manager.get_item('command') == 'job-add-step':
step_args = reduce_step_args(args) step_args = reduce_step_args(args)
@@ -195,6 +211,7 @@ def route_job_manager(args : Args) -> ErrorCode:
return 0 return 0
logger.error(wording.get('job_step_not_added').format(job_id = state_manager.get_item('job_id')), __name__) logger.error(wording.get('job_step_not_added').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-remix-step': if state_manager.get_item('command') == 'job-remix-step':
step_args = reduce_step_args(args) step_args = reduce_step_args(args)
@@ -203,6 +220,7 @@ def route_job_manager(args : Args) -> ErrorCode:
return 0 return 0
logger.error(wording.get('job_remix_step_not_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__) logger.error(wording.get('job_remix_step_not_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-insert-step': if state_manager.get_item('command') == 'job-insert-step':
step_args = reduce_step_args(args) step_args = reduce_step_args(args)
@@ -211,6 +229,7 @@ def route_job_manager(args : Args) -> ErrorCode:
return 0 return 0
logger.error(wording.get('job_step_not_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__) logger.error(wording.get('job_step_not_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-remove-step': if state_manager.get_item('command') == 'job-remove-step':
if job_manager.remove_step(state_manager.get_item('job_id'), state_manager.get_item('step_index')): if job_manager.remove_step(state_manager.get_item('job_id'), state_manager.get_item('step_index')):
logger.info(wording.get('job_step_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__) logger.info(wording.get('job_step_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
@@ -228,13 +247,15 @@ def route_job_runner() -> ErrorCode:
return 0 return 0
logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-run-all': if state_manager.get_item('command') == 'job-run-all':
logger.info(wording.get('running_jobs'), __name__) logger.info(wording.get('running_jobs'), __name__)
if job_runner.run_jobs(process_step): if job_runner.run_jobs(process_step, state_manager.get_item('halt_on_error')):
logger.info(wording.get('processing_jobs_succeed'), __name__) logger.info(wording.get('processing_jobs_succeed'), __name__)
return 0 return 0
logger.info(wording.get('processing_jobs_failed'), __name__) logger.info(wording.get('processing_jobs_failed'), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-retry': if state_manager.get_item('command') == 'job-retry':
logger.info(wording.get('retrying_job').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('retrying_job').format(job_id = state_manager.get_item('job_id')), __name__)
if job_runner.retry_job(state_manager.get_item('job_id'), process_step): if job_runner.retry_job(state_manager.get_item('job_id'), process_step):
@@ -242,9 +263,10 @@ def route_job_runner() -> ErrorCode:
return 0 return 0
logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__) logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
return 1 return 1
if state_manager.get_item('command') == 'job-retry-all': if state_manager.get_item('command') == 'job-retry-all':
logger.info(wording.get('retrying_jobs'), __name__) logger.info(wording.get('retrying_jobs'), __name__)
if job_runner.retry_jobs(process_step): if job_runner.retry_jobs(process_step, state_manager.get_item('halt_on_error')):
logger.info(wording.get('processing_jobs_succeed'), __name__) logger.info(wording.get('processing_jobs_succeed'), __name__)
return 0 return 0
logger.info(wording.get('processing_jobs_failed'), __name__) logger.info(wording.get('processing_jobs_failed'), __name__)
@@ -305,14 +327,18 @@ def process_step(job_id : str, step_index : int, step_args : Args) -> bool:
def conditional_process() -> ErrorCode: def conditional_process() -> ErrorCode:
start_time = time() start_time = time()
for processor_module in get_processors_modules(state_manager.get_item('processors')): for processor_module in get_processors_modules(state_manager.get_item('processors')):
if not processor_module.pre_process('output'): if not processor_module.pre_process('output'):
return 2 return 2
conditional_append_reference_faces() conditional_append_reference_faces()
if is_image(state_manager.get_item('target_path')): if is_image(state_manager.get_item('target_path')):
return process_image(start_time) return process_image(start_time)
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
return process_video(start_time) return process_video(start_time)
return 0 return 0
@@ -322,7 +348,7 @@ def conditional_append_reference_faces() -> None:
source_faces = get_many_faces(source_frames) source_faces = get_many_faces(source_frames)
source_face = get_average_face(source_faces) source_face = get_average_face(source_faces)
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
else: else:
reference_frame = read_image(state_manager.get_item('target_path')) reference_frame = read_image(state_manager.get_item('target_path'))
reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ])) reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ]))
@@ -341,13 +367,12 @@ def conditional_append_reference_faces() -> None:
def process_image(start_time : float) -> ErrorCode: def process_image(start_time : float) -> ErrorCode:
if analyse_image(state_manager.get_item('target_path')): if analyse_image(state_manager.get_item('target_path')):
return 3 return 3
# clear temp
logger.debug(wording.get('clearing_temp'), __name__) logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path')) clear_temp_directory(state_manager.get_item('target_path'))
# create temp
logger.debug(wording.get('creating_temp'), __name__) logger.debug(wording.get('creating_temp'), __name__)
create_temp_directory(state_manager.get_item('target_path')) create_temp_directory(state_manager.get_item('target_path'))
# copy image
process_manager.start() process_manager.start()
temp_image_resolution = pack_resolution(restrict_image_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_image_resolution')))) temp_image_resolution = pack_resolution(restrict_image_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_image_resolution'))))
logger.info(wording.get('copying_image').format(resolution = temp_image_resolution), __name__) logger.info(wording.get('copying_image').format(resolution = temp_image_resolution), __name__)
@@ -357,7 +382,7 @@ def process_image(start_time : float) -> ErrorCode:
logger.error(wording.get('copying_image_failed'), __name__) logger.error(wording.get('copying_image_failed'), __name__)
process_manager.end() process_manager.end()
return 1 return 1
# process image
temp_file_path = get_temp_file_path(state_manager.get_item('target_path')) temp_file_path = get_temp_file_path(state_manager.get_item('target_path'))
for processor_module in get_processors_modules(state_manager.get_item('processors')): for processor_module in get_processors_modules(state_manager.get_item('processors')):
logger.info(wording.get('processing'), processor_module.__name__) logger.info(wording.get('processing'), processor_module.__name__)
@@ -366,16 +391,16 @@ def process_image(start_time : float) -> ErrorCode:
if is_process_stopping(): if is_process_stopping():
process_manager.end() process_manager.end()
return 4 return 4
# finalize image
logger.info(wording.get('finalizing_image').format(resolution = state_manager.get_item('output_image_resolution')), __name__) logger.info(wording.get('finalizing_image').format(resolution = state_manager.get_item('output_image_resolution')), __name__)
if finalize_image(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_image_resolution')): if finalize_image(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_image_resolution')):
logger.debug(wording.get('finalizing_image_succeed'), __name__) logger.debug(wording.get('finalizing_image_succeed'), __name__)
else: else:
logger.warn(wording.get('finalizing_image_skipped'), __name__) logger.warn(wording.get('finalizing_image_skipped'), __name__)
# clear temp
logger.debug(wording.get('clearing_temp'), __name__) logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path')) clear_temp_directory(state_manager.get_item('target_path'))
# validate image
if is_image(state_manager.get_item('output_path')): if is_image(state_manager.get_item('output_path')):
seconds = '{:.2f}'.format((time() - start_time) % 60) seconds = '{:.2f}'.format((time() - start_time) % 60)
logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__) logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__)
@@ -392,13 +417,12 @@ def process_video(start_time : float) -> ErrorCode:
trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end')) trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end'))
if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end): if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end):
return 3 return 3
# clear temp
logger.debug(wording.get('clearing_temp'), __name__) logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path')) clear_temp_directory(state_manager.get_item('target_path'))
# create temp
logger.debug(wording.get('creating_temp'), __name__) logger.debug(wording.get('creating_temp'), __name__)
create_temp_directory(state_manager.get_item('target_path')) create_temp_directory(state_manager.get_item('target_path'))
# extract frames
process_manager.start() process_manager.start()
temp_video_resolution = pack_resolution(restrict_video_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_video_resolution')))) temp_video_resolution = pack_resolution(restrict_video_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_video_resolution'))))
temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps')) temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
@@ -412,8 +436,8 @@ def process_video(start_time : float) -> ErrorCode:
logger.error(wording.get('extracting_frames_failed'), __name__) logger.error(wording.get('extracting_frames_failed'), __name__)
process_manager.end() process_manager.end()
return 1 return 1
# process frames
temp_frame_paths = get_temp_frame_paths(state_manager.get_item('target_path')) temp_frame_paths = resolve_temp_frame_paths(state_manager.get_item('target_path'))
if temp_frame_paths: if temp_frame_paths:
for processor_module in get_processors_modules(state_manager.get_item('processors')): for processor_module in get_processors_modules(state_manager.get_item('processors')):
logger.info(wording.get('processing'), processor_module.__name__) logger.info(wording.get('processing'), processor_module.__name__)
@@ -425,9 +449,9 @@ def process_video(start_time : float) -> ErrorCode:
logger.error(wording.get('temp_frames_not_found'), __name__) logger.error(wording.get('temp_frames_not_found'), __name__)
process_manager.end() process_manager.end()
return 1 return 1
# merge video
logger.info(wording.get('merging_video').format(resolution = state_manager.get_item('output_video_resolution'), fps = state_manager.get_item('output_video_fps')), __name__) logger.info(wording.get('merging_video').format(resolution = state_manager.get_item('output_video_resolution'), fps = state_manager.get_item('output_video_fps')), __name__)
if merge_video(state_manager.get_item('target_path'), state_manager.get_item('output_video_resolution'), state_manager.get_item('output_video_fps')): if merge_video(state_manager.get_item('target_path'), temp_video_fps, state_manager.get_item('output_video_resolution'), state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end):
logger.debug(wording.get('merging_video_succeed'), __name__) logger.debug(wording.get('merging_video_succeed'), __name__)
else: else:
if is_process_stopping(): if is_process_stopping():
@@ -436,8 +460,8 @@ def process_video(start_time : float) -> ErrorCode:
logger.error(wording.get('merging_video_failed'), __name__) logger.error(wording.get('merging_video_failed'), __name__)
process_manager.end() process_manager.end()
return 1 return 1
# handle audio
if state_manager.get_item('skip_audio'): if state_manager.get_item('output_audio_volume') == 0:
logger.info(wording.get('skipping_audio'), __name__) logger.info(wording.get('skipping_audio'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
else: else:
@@ -452,7 +476,7 @@ def process_video(start_time : float) -> ErrorCode:
logger.warn(wording.get('replacing_audio_skipped'), __name__) logger.warn(wording.get('replacing_audio_skipped'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
else: else:
if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end): if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), trim_frame_start, trim_frame_end):
logger.debug(wording.get('restoring_audio_succeed'), __name__) logger.debug(wording.get('restoring_audio_succeed'), __name__)
else: else:
if is_process_stopping(): if is_process_stopping():
@@ -460,10 +484,10 @@ def process_video(start_time : float) -> ErrorCode:
return 4 return 4
logger.warn(wording.get('restoring_audio_skipped'), __name__) logger.warn(wording.get('restoring_audio_skipped'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
# clear temp
logger.debug(wording.get('clearing_temp'), __name__) logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path')) clear_temp_directory(state_manager.get_item('target_path'))
# validate video
if is_video(state_manager.get_item('output_path')): if is_video(state_manager.get_item('output_path')):
seconds = '{:.2f}'.format((time() - start_time)) seconds = '{:.2f}'.format((time() - start_time))
logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__) logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__)

View File

@@ -0,0 +1,27 @@
import itertools
import shutil
from facefusion import metadata
from facefusion.types import Commands
def run(commands : Commands) -> Commands:
user_agent = metadata.get('name') + '/' + metadata.get('version')
return [ shutil.which('curl'), '--user-agent', user_agent, '--insecure', '--location', '--silent' ] + commands
def chain(*commands : Commands) -> Commands:
return list(itertools.chain(*commands))
def head(url : str) -> Commands:
return [ '-I', url ]
def download(url : str, download_file_path : str) -> Commands:
return [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ]
def set_timeout(timeout : int) -> Commands:
return [ '--connect-timeout', str(timeout) ]

View File

@@ -1,5 +1,4 @@
import os import os
import shutil
import subprocess import subprocess
from functools import lru_cache from functools import lru_cache
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
@@ -8,15 +7,14 @@ from urllib.parse import urlparse
from tqdm import tqdm from tqdm import tqdm
import facefusion.choices import facefusion.choices
from facefusion import logger, process_manager, state_manager, wording from facefusion import curl_builder, logger, process_manager, state_manager, wording
from facefusion.filesystem import get_file_size, is_file, remove_file from facefusion.filesystem import get_file_name, get_file_size, is_file, remove_file
from facefusion.hash_helper import validate_hash from facefusion.hash_helper import validate_hash
from facefusion.typing import DownloadProvider, DownloadSet from facefusion.types import Commands, DownloadProvider, DownloadSet
def open_curl(args : List[str]) -> subprocess.Popen[bytes]: def open_curl(commands : Commands) -> subprocess.Popen[bytes]:
commands = [ shutil.which('curl'), '--silent', '--insecure', '--location' ] commands = curl_builder.run(commands)
commands.extend(args)
return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE) return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
@@ -29,7 +27,10 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non
if initial_size < download_size: if initial_size < download_size:
with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
commands = [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ] commands = curl_builder.chain(
curl_builder.download(url, download_file_path),
curl_builder.set_timeout(10)
)
open_curl(commands) open_curl(commands)
current_size = initial_size current_size = initial_size
progress.set_postfix(download_providers = state_manager.get_item('download_providers'), file_name = download_file_name) progress.set_postfix(download_providers = state_manager.get_item('download_providers'), file_name = download_file_name)
@@ -42,7 +43,10 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
def get_static_download_size(url : str) -> int: def get_static_download_size(url : str) -> int:
commands = [ '-I', url ] commands = curl_builder.chain(
curl_builder.head(url),
curl_builder.set_timeout(5)
)
process = open_curl(commands) process = open_curl(commands)
lines = reversed(process.stdout.readlines()) lines = reversed(process.stdout.readlines())
@@ -57,32 +61,35 @@ def get_static_download_size(url : str) -> int:
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
def ping_static_url(url : str) -> bool: def ping_static_url(url : str) -> bool:
commands = [ '-I', url ] commands = curl_builder.chain(
curl_builder.head(url),
curl_builder.set_timeout(5)
)
process = open_curl(commands) process = open_curl(commands)
process.communicate() process.communicate()
return process.returncode == 0 return process.returncode == 0
def conditional_download_hashes(hashes : DownloadSet) -> bool: def conditional_download_hashes(hash_set : DownloadSet) -> bool:
hash_paths = [ hashes.get(hash_key).get('path') for hash_key in hashes.keys() ] hash_paths = [ hash_set.get(hash_key).get('path') for hash_key in hash_set.keys() ]
process_manager.check() process_manager.check()
_, invalid_hash_paths = validate_hash_paths(hash_paths) _, invalid_hash_paths = validate_hash_paths(hash_paths)
if invalid_hash_paths: if invalid_hash_paths:
for index in hashes: for index in hash_set:
if hashes.get(index).get('path') in invalid_hash_paths: if hash_set.get(index).get('path') in invalid_hash_paths:
invalid_hash_url = hashes.get(index).get('url') invalid_hash_url = hash_set.get(index).get('url')
if invalid_hash_url: if invalid_hash_url:
download_directory_path = os.path.dirname(hashes.get(index).get('path')) download_directory_path = os.path.dirname(hash_set.get(index).get('path'))
conditional_download(download_directory_path, [ invalid_hash_url ]) conditional_download(download_directory_path, [ invalid_hash_url ])
valid_hash_paths, invalid_hash_paths = validate_hash_paths(hash_paths) valid_hash_paths, invalid_hash_paths = validate_hash_paths(hash_paths)
for valid_hash_path in valid_hash_paths: for valid_hash_path in valid_hash_paths:
valid_hash_file_name, _ = os.path.splitext(os.path.basename(valid_hash_path)) valid_hash_file_name = get_file_name(valid_hash_path)
logger.debug(wording.get('validating_hash_succeed').format(hash_file_name = valid_hash_file_name), __name__) logger.debug(wording.get('validating_hash_succeed').format(hash_file_name = valid_hash_file_name), __name__)
for invalid_hash_path in invalid_hash_paths: for invalid_hash_path in invalid_hash_paths:
invalid_hash_file_name, _ = os.path.splitext(os.path.basename(invalid_hash_path)) invalid_hash_file_name = get_file_name(invalid_hash_path)
logger.error(wording.get('validating_hash_failed').format(hash_file_name = invalid_hash_file_name), __name__) logger.error(wording.get('validating_hash_failed').format(hash_file_name = invalid_hash_file_name), __name__)
if not invalid_hash_paths: if not invalid_hash_paths:
@@ -90,26 +97,26 @@ def conditional_download_hashes(hashes : DownloadSet) -> bool:
return not invalid_hash_paths return not invalid_hash_paths
def conditional_download_sources(sources : DownloadSet) -> bool: def conditional_download_sources(source_set : DownloadSet) -> bool:
source_paths = [ sources.get(source_key).get('path') for source_key in sources.keys() ] source_paths = [ source_set.get(source_key).get('path') for source_key in source_set.keys() ]
process_manager.check() process_manager.check()
_, invalid_source_paths = validate_source_paths(source_paths) _, invalid_source_paths = validate_source_paths(source_paths)
if invalid_source_paths: if invalid_source_paths:
for index in sources: for index in source_set:
if sources.get(index).get('path') in invalid_source_paths: if source_set.get(index).get('path') in invalid_source_paths:
invalid_source_url = sources.get(index).get('url') invalid_source_url = source_set.get(index).get('url')
if invalid_source_url: if invalid_source_url:
download_directory_path = os.path.dirname(sources.get(index).get('path')) download_directory_path = os.path.dirname(source_set.get(index).get('path'))
conditional_download(download_directory_path, [ invalid_source_url ]) conditional_download(download_directory_path, [ invalid_source_url ])
valid_source_paths, invalid_source_paths = validate_source_paths(source_paths) valid_source_paths, invalid_source_paths = validate_source_paths(source_paths)
for valid_source_path in valid_source_paths: for valid_source_path in valid_source_paths:
valid_source_file_name, _ = os.path.splitext(os.path.basename(valid_source_path)) valid_source_file_name = get_file_name(valid_source_path)
logger.debug(wording.get('validating_source_succeed').format(source_file_name = valid_source_file_name), __name__) logger.debug(wording.get('validating_source_succeed').format(source_file_name = valid_source_file_name), __name__)
for invalid_source_path in invalid_source_paths: for invalid_source_path in invalid_source_paths:
invalid_source_file_name, _ = os.path.splitext(os.path.basename(invalid_source_path)) invalid_source_file_name = get_file_name(invalid_source_path)
logger.error(wording.get('validating_source_failed').format(source_file_name = invalid_source_file_name), __name__) logger.error(wording.get('validating_source_failed').format(source_file_name = invalid_source_file_name), __name__)
if remove_file(invalid_source_path): if remove_file(invalid_source_path):
@@ -129,6 +136,7 @@ def validate_hash_paths(hash_paths : List[str]) -> Tuple[List[str], List[str]]:
valid_hash_paths.append(hash_path) valid_hash_paths.append(hash_path)
else: else:
invalid_hash_paths.append(hash_path) invalid_hash_paths.append(hash_path)
return valid_hash_paths, invalid_hash_paths return valid_hash_paths, invalid_hash_paths
@@ -141,6 +149,7 @@ def validate_source_paths(source_paths : List[str]) -> Tuple[List[str], List[str
valid_source_paths.append(source_path) valid_source_paths.append(source_path)
else: else:
invalid_source_paths.append(source_path) invalid_source_paths.append(source_path)
return valid_source_paths, invalid_source_paths return valid_source_paths, invalid_source_paths
@@ -148,16 +157,18 @@ def resolve_download_url(base_name : str, file_name : str) -> Optional[str]:
download_providers = state_manager.get_item('download_providers') download_providers = state_manager.get_item('download_providers')
for download_provider in download_providers: for download_provider in download_providers:
if ping_download_provider(download_provider): download_url = resolve_download_url_by_provider(download_provider, base_name, file_name)
return resolve_download_url_by_provider(download_provider, base_name, file_name) if download_url:
return download_url
return None return None
def ping_download_provider(download_provider : DownloadProvider) -> bool:
download_provider_value = facefusion.choices.download_provider_set.get(download_provider)
return ping_static_url(download_provider_value.get('url'))
def resolve_download_url_by_provider(download_provider : DownloadProvider, base_name : str, file_name : str) -> Optional[str]: def resolve_download_url_by_provider(download_provider : DownloadProvider, base_name : str, file_name : str) -> Optional[str]:
download_provider_value = facefusion.choices.download_provider_set.get(download_provider) download_provider_value = facefusion.choices.download_provider_set.get(download_provider)
return download_provider_value.get('url') + download_provider_value.get('path').format(base_name = base_name, file_name = file_name)
for download_provider_url in download_provider_value.get('urls'):
if ping_static_url(download_provider_url):
return download_provider_url + download_provider_value.get('path').format(base_name = base_name, file_name = file_name)
return None

View File

@@ -2,12 +2,12 @@ import shutil
import subprocess import subprocess
import xml.etree.ElementTree as ElementTree import xml.etree.ElementTree as ElementTree
from functools import lru_cache from functools import lru_cache
from typing import Any, List, Optional from typing import List, Optional
from onnxruntime import get_available_providers, set_default_logger_severity from onnxruntime import get_available_providers, set_default_logger_severity
import facefusion.choices import facefusion.choices
from facefusion.typing import ExecutionDevice, ExecutionProvider, ValueAndUnit from facefusion.types import ExecutionDevice, ExecutionProvider, InferenceSessionProvider, ValueAndUnit
set_default_logger_severity(3) set_default_logger_severity(3)
@@ -17,28 +17,29 @@ def has_execution_provider(execution_provider : ExecutionProvider) -> bool:
def get_available_execution_providers() -> List[ExecutionProvider]: def get_available_execution_providers() -> List[ExecutionProvider]:
inference_execution_providers = get_available_providers() inference_session_providers = get_available_providers()
available_execution_providers = [] available_execution_providers : List[ExecutionProvider] = []
for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items(): for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items():
if execution_provider_value in inference_execution_providers: if execution_provider_value in inference_session_providers:
available_execution_providers.append(execution_provider) index = facefusion.choices.execution_providers.index(execution_provider)
available_execution_providers.insert(index, execution_provider)
return available_execution_providers return available_execution_providers
def create_inference_execution_providers(execution_device_id : str, execution_providers : List[ExecutionProvider]) -> List[Any]: def create_inference_session_providers(execution_device_id : str, execution_providers : List[ExecutionProvider]) -> List[InferenceSessionProvider]:
inference_execution_providers : List[Any] = [] inference_session_providers : List[InferenceSessionProvider] = []
for execution_provider in execution_providers: for execution_provider in execution_providers:
if execution_provider == 'cuda': if execution_provider == 'cuda':
inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{ {
'device_id': execution_device_id, 'device_id': execution_device_id,
'cudnn_conv_algo_search': 'DEFAULT' if is_geforce_16_series() else 'EXHAUSTIVE' 'cudnn_conv_algo_search': resolve_cudnn_conv_algo_search()
})) }))
if execution_provider == 'tensorrt': if execution_provider == 'tensorrt':
inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{ {
'device_id': execution_device_id, 'device_id': execution_device_id,
'trt_engine_cache_enable': True, 'trt_engine_cache_enable': True,
@@ -47,31 +48,47 @@ def create_inference_execution_providers(execution_device_id : str, execution_pr
'trt_timing_cache_path': '.caches', 'trt_timing_cache_path': '.caches',
'trt_builder_optimization_level': 5 'trt_builder_optimization_level': 5
})) }))
if execution_provider == 'openvino':
inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'device_type': 'GPU' if execution_device_id == '0' else 'GPU.' + execution_device_id,
'precision': 'FP32'
}))
if execution_provider in [ 'directml', 'rocm' ]: if execution_provider in [ 'directml', 'rocm' ]:
inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{ {
'device_id': execution_device_id 'device_id': execution_device_id
})) }))
if execution_provider == 'openvino':
inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'device_type': resolve_openvino_device_type(execution_device_id),
'precision': 'FP32'
}))
if execution_provider == 'coreml': if execution_provider == 'coreml':
inference_execution_providers.append(facefusion.choices.execution_provider_set.get(execution_provider)) inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
{
'SpecializationStrategy': 'FastPrediction',
'ModelCacheDirectory': '.caches'
}))
if 'cpu' in execution_providers: if 'cpu' in execution_providers:
inference_execution_providers.append(facefusion.choices.execution_provider_set.get('cpu')) inference_session_providers.append(facefusion.choices.execution_provider_set.get('cpu'))
return inference_execution_providers return inference_session_providers
def is_geforce_16_series() -> bool: def resolve_cudnn_conv_algo_search() -> str:
execution_devices = detect_static_execution_devices() execution_devices = detect_static_execution_devices()
product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660') product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660')
return any(execution_device.get('product').get('name').startswith(product_names) for execution_device in execution_devices) for execution_device in execution_devices:
if execution_device.get('product').get('name').startswith(product_names):
return 'DEFAULT'
return 'EXHAUSTIVE'
def resolve_openvino_device_type(execution_device_id : str) -> str:
if execution_device_id == '0':
return 'GPU'
if execution_device_id == '':
return 'MULTI:GPU'
return 'GPU.' + execution_device_id
def run_nvidia_smi() -> subprocess.Popen[bytes]: def run_nvidia_smi() -> subprocess.Popen[bytes]:
@@ -129,7 +146,7 @@ def detect_execution_devices() -> List[ExecutionDevice]:
def create_value_and_unit(text : str) -> Optional[ValueAndUnit]: def create_value_and_unit(text : str) -> Optional[ValueAndUnit]:
if ' ' in text: if ' ' in text:
value, unit = text.split(' ') value, unit = text.split()
return\ return\
{ {

View File

@@ -4,7 +4,7 @@ from time import sleep
from facefusion import process_manager, state_manager from facefusion import process_manager, state_manager
from facefusion.temp_helper import clear_temp_directory from facefusion.temp_helper import clear_temp_directory
from facefusion.typing import ErrorCode from facefusion.types import ErrorCode
def hard_exit(error_code : ErrorCode) -> None: def hard_exit(error_code : ErrorCode) -> None:
@@ -12,11 +12,6 @@ def hard_exit(error_code : ErrorCode) -> None:
sys.exit(error_code) sys.exit(error_code)
def conditional_exit(error_code : ErrorCode) -> None:
if state_manager.get_item('command') == 'headless-run':
hard_exit(error_code)
def graceful_exit(error_code : ErrorCode) -> None: def graceful_exit(error_code : ErrorCode) -> None:
process_manager.stop() process_manager.stop()
while process_manager.is_processing(): while process_manager.is_processing():

View File

@@ -7,10 +7,10 @@ from facefusion.common_helper import get_first
from facefusion.face_classifier import classify_face from facefusion.face_classifier import classify_face
from facefusion.face_detector import detect_faces, detect_rotated_faces from facefusion.face_detector import detect_faces, detect_rotated_faces
from facefusion.face_helper import apply_nms, convert_to_face_landmark_5, estimate_face_angle, get_nms_threshold from facefusion.face_helper import apply_nms, convert_to_face_landmark_5, estimate_face_angle, get_nms_threshold
from facefusion.face_landmarker import detect_face_landmarks, estimate_face_landmark_68_5 from facefusion.face_landmarker import detect_face_landmark, estimate_face_landmark_68_5
from facefusion.face_recognizer import calc_embedding from facefusion.face_recognizer import calc_embedding
from facefusion.face_store import get_static_faces, set_static_faces from facefusion.face_store import get_static_faces, set_static_faces
from facefusion.typing import BoundingBox, Face, FaceLandmark5, FaceLandmarkSet, FaceScoreSet, Score, VisionFrame from facefusion.types import BoundingBox, Face, FaceLandmark5, FaceLandmarkSet, FaceScoreSet, Score, VisionFrame
def create_faces(vision_frame : VisionFrame, bounding_boxes : List[BoundingBox], face_scores : List[Score], face_landmarks_5 : List[FaceLandmark5]) -> List[Face]: def create_faces(vision_frame : VisionFrame, bounding_boxes : List[BoundingBox], face_scores : List[Score], face_landmarks_5 : List[FaceLandmark5]) -> List[Face]:
@@ -29,7 +29,7 @@ def create_faces(vision_frame : VisionFrame, bounding_boxes : List[BoundingBox],
face_angle = estimate_face_angle(face_landmark_68_5) face_angle = estimate_face_angle(face_landmark_68_5)
if state_manager.get_item('face_landmarker_score') > 0: if state_manager.get_item('face_landmarker_score') > 0:
face_landmark_68, face_landmark_score_68 = detect_face_landmarks(vision_frame, bounding_box, face_angle) face_landmark_68, face_landmark_score_68 = detect_face_landmark(vision_frame, bounding_box, face_angle)
if face_landmark_score_68 > state_manager.get_item('face_landmarker_score'): if face_landmark_score_68 > state_manager.get_item('face_landmarker_score'):
face_landmark_5_68 = convert_to_face_landmark_5(face_landmark_68) face_landmark_5_68 = convert_to_face_landmark_5(face_landmark_68)

View File

@@ -8,7 +8,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.face_helper import warp_face_by_face_landmark_5 from facefusion.face_helper import warp_face_by_face_landmark_5
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import Age, DownloadScope, FaceLandmark5, Gender, InferencePool, ModelOptions, ModelSet, Race, VisionFrame from facefusion.types import Age, DownloadScope, FaceLandmark5, Gender, InferencePool, ModelOptions, ModelSet, Race, VisionFrame
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -42,12 +42,15 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ 'fairface' ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ 'fairface' ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
@@ -55,10 +58,10 @@ def get_model_options() -> ModelOptions:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def classify_face(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Gender, Age, Race]: def classify_face(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Gender, Age, Race]:
@@ -67,7 +70,7 @@ def classify_face(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmar
model_mean = get_model_options().get('mean') model_mean = get_model_options().get('mean')
model_standard_deviation = get_model_options().get('standard_deviation') model_standard_deviation = get_model_options().get('standard_deviation')
crop_vision_frame, _ = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size) crop_vision_frame, _ = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
crop_vision_frame = crop_vision_frame.astype(numpy.float32)[:, :, ::-1] / 255 crop_vision_frame = crop_vision_frame.astype(numpy.float32)[:, :, ::-1] / 255.0
crop_vision_frame -= model_mean crop_vision_frame -= model_mean
crop_vision_frame /= model_standard_deviation crop_vision_frame /= model_standard_deviation
crop_vision_frame = crop_vision_frame.transpose(2, 0, 1) crop_vision_frame = crop_vision_frame.transpose(2, 0, 1)

View File

@@ -1,16 +1,16 @@
from typing import List, Tuple from functools import lru_cache
from typing import List, Sequence, Tuple
import cv2 import cv2
import numpy import numpy
from charset_normalizer.md import lru_cache
from facefusion import inference_manager, state_manager from facefusion import inference_manager, state_manager
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_helper import create_rotated_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points from facefusion.face_helper import create_rotated_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import thread_semaphore from facefusion.thread_helper import thread_semaphore
from facefusion.typing import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame from facefusion.types import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame
from facefusion.vision import resize_frame_resolution, unpack_resolution from facefusion.vision import restrict_frame, unpack_resolution
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -55,11 +55,11 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
} }
} }
}, },
'yoloface': 'yolo_face':
{ {
'hashes': 'hashes':
{ {
'yoloface': 'yolo_face':
{ {
'url': resolve_download_url('models-3.0.0', 'yoloface_8n.hash'), 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.hash'),
'path': resolve_relative_path('../.assets/models/yoloface_8n.hash') 'path': resolve_relative_path('../.assets/models/yoloface_8n.hash')
@@ -67,7 +67,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
}, },
'sources': 'sources':
{ {
'yoloface': 'yolo_face':
{ {
'url': resolve_download_url('models-3.0.0', 'yoloface_8n.onnx'), 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.onnx'),
'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx') 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx')
@@ -78,38 +78,34 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
_, model_sources = collect_model_downloads() model_names = [ state_manager.get_item('face_detector_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) _, model_source_set = collect_model_downloads()
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('face_detector_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
model_hashes = {}
model_sources = {}
model_set = create_static_model_set('full') model_set = create_static_model_set('full')
model_hash_set = {}
model_source_set = {}
if state_manager.get_item('face_detector_model') in [ 'many', 'retinaface' ]: for face_detector_model in [ 'retinaface', 'scrfd', 'yolo_face' ]:
model_hashes['retinaface'] = model_set.get('retinaface').get('hashes').get('retinaface') if state_manager.get_item('face_detector_model') in [ 'many', face_detector_model ]:
model_sources['retinaface'] = model_set.get('retinaface').get('sources').get('retinaface') model_hash_set[face_detector_model] = model_set.get(face_detector_model).get('hashes').get(face_detector_model)
model_source_set[face_detector_model] = model_set.get(face_detector_model).get('sources').get(face_detector_model)
if state_manager.get_item('face_detector_model') in [ 'many', 'scrfd' ]: return model_hash_set, model_source_set
model_hashes['scrfd'] = model_set.get('scrfd').get('hashes').get('scrfd')
model_sources['scrfd'] = model_set.get('scrfd').get('sources').get('scrfd')
if state_manager.get_item('face_detector_model') in [ 'many', 'yoloface' ]:
model_hashes['yoloface'] = model_set.get('yoloface').get('hashes').get('yoloface')
model_sources['yoloface'] = model_set.get('yoloface').get('sources').get('yoloface')
return model_hashes, model_sources
def pre_check() -> bool: def pre_check() -> bool:
model_hashes, model_sources = collect_model_downloads() model_hash_set, model_source_set = collect_model_downloads()
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]: def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
@@ -129,8 +125,8 @@ def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Sc
all_face_scores.extend(face_scores) all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5) all_face_landmarks_5.extend(face_landmarks_5)
if state_manager.get_item('face_detector_model') in [ 'many', 'yoloface' ]: if state_manager.get_item('face_detector_model') in [ 'many', 'yolo_face' ]:
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yoloface(vision_frame, state_manager.get_item('face_detector_size')) bounding_boxes, face_scores, face_landmarks_5 = detect_with_yolo_face(vision_frame, state_manager.get_item('face_detector_size'))
all_bounding_boxes.extend(bounding_boxes) all_bounding_boxes.extend(bounding_boxes)
all_face_scores.extend(face_scores) all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5) all_face_landmarks_5.extend(face_landmarks_5)
@@ -156,37 +152,39 @@ def detect_with_retinaface(vision_frame : VisionFrame, face_detector_size : str)
feature_strides = [ 8, 16, 32 ] feature_strides = [ 8, 16, 32 ]
feature_map_channel = 3 feature_map_channel = 3
anchor_total = 2 anchor_total = 2
face_detector_score = state_manager.get_item('face_detector_score')
face_detector_width, face_detector_height = unpack_resolution(face_detector_size) face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
detection = forward_with_retinaface(detect_vision_frame) detection = forward_with_retinaface(detect_vision_frame)
for index, feature_stride in enumerate(feature_strides): for index, feature_stride in enumerate(feature_strides):
keep_indices = numpy.where(detection[index] >= state_manager.get_item('face_detector_score'))[0] keep_indices = numpy.where(detection[index] >= face_detector_score)[0]
if numpy.any(keep_indices): if numpy.any(keep_indices):
stride_height = face_detector_height // feature_stride stride_height = face_detector_height // feature_stride
stride_width = face_detector_width // feature_stride stride_width = face_detector_width // feature_stride
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width) anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
bounding_box_raw = detection[index + feature_map_channel] * feature_stride bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
face_landmark_5_raw = detection[index + feature_map_channel * 2] * feature_stride face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
for bounding_box in distance_to_bounding_box(anchors, bounding_box_raw)[keep_indices]: for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
bounding_boxes.append(numpy.array( bounding_boxes.append(numpy.array(
[ [
bounding_box[0] * ratio_width, bounding_box_raw[0] * ratio_width,
bounding_box[1] * ratio_height, bounding_box_raw[1] * ratio_height,
bounding_box[2] * ratio_width, bounding_box_raw[2] * ratio_width,
bounding_box[3] * ratio_height, bounding_box_raw[3] * ratio_height
])) ]))
for score in detection[index][keep_indices]: for face_score_raw in detection[index][keep_indices]:
face_scores.append(score[0]) face_scores.append(face_score_raw[0])
for face_landmark_5 in distance_to_face_landmark_5(anchors, face_landmark_5_raw)[keep_indices]: for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
face_landmarks_5.append(face_landmark_5 * [ ratio_width, ratio_height ]) face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
return bounding_boxes, face_scores, face_landmarks_5 return bounding_boxes, face_scores, face_landmarks_5
@@ -198,73 +196,77 @@ def detect_with_scrfd(vision_frame : VisionFrame, face_detector_size : str) -> T
feature_strides = [ 8, 16, 32 ] feature_strides = [ 8, 16, 32 ]
feature_map_channel = 3 feature_map_channel = 3
anchor_total = 2 anchor_total = 2
face_detector_score = state_manager.get_item('face_detector_score')
face_detector_width, face_detector_height = unpack_resolution(face_detector_size) face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
detection = forward_with_scrfd(detect_vision_frame) detection = forward_with_scrfd(detect_vision_frame)
for index, feature_stride in enumerate(feature_strides): for index, feature_stride in enumerate(feature_strides):
keep_indices = numpy.where(detection[index] >= state_manager.get_item('face_detector_score'))[0] keep_indices = numpy.where(detection[index] >= face_detector_score)[0]
if numpy.any(keep_indices): if numpy.any(keep_indices):
stride_height = face_detector_height // feature_stride stride_height = face_detector_height // feature_stride
stride_width = face_detector_width // feature_stride stride_width = face_detector_width // feature_stride
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width) anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
bounding_box_raw = detection[index + feature_map_channel] * feature_stride bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
face_landmark_5_raw = detection[index + feature_map_channel * 2] * feature_stride face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
for bounding_box in distance_to_bounding_box(anchors, bounding_box_raw)[keep_indices]: for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
bounding_boxes.append(numpy.array( bounding_boxes.append(numpy.array(
[ [
bounding_box[0] * ratio_width, bounding_box_raw[0] * ratio_width,
bounding_box[1] * ratio_height, bounding_box_raw[1] * ratio_height,
bounding_box[2] * ratio_width, bounding_box_raw[2] * ratio_width,
bounding_box[3] * ratio_height, bounding_box_raw[3] * ratio_height
])) ]))
for score in detection[index][keep_indices]: for face_score_raw in detection[index][keep_indices]:
face_scores.append(score[0]) face_scores.append(face_score_raw[0])
for face_landmark_5 in distance_to_face_landmark_5(anchors, face_landmark_5_raw)[keep_indices]: for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
face_landmarks_5.append(face_landmark_5 * [ ratio_width, ratio_height ]) face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
return bounding_boxes, face_scores, face_landmarks_5 return bounding_boxes, face_scores, face_landmarks_5
def detect_with_yoloface(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]: def detect_with_yolo_face(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
bounding_boxes = [] bounding_boxes = []
face_scores = [] face_scores = []
face_landmarks_5 = [] face_landmarks_5 = []
face_detector_score = state_manager.get_item('face_detector_score')
face_detector_width, face_detector_height = unpack_resolution(face_detector_size) face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
detection = forward_with_yoloface(detect_vision_frame) detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ 0, 1 ])
detection = forward_with_yolo_face(detect_vision_frame)
detection = numpy.squeeze(detection).T detection = numpy.squeeze(detection).T
bounding_box_raw, score_raw, face_landmark_5_raw = numpy.split(detection, [ 4, 5 ], axis = 1) bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = numpy.split(detection, [ 4, 5 ], axis = 1)
keep_indices = numpy.where(score_raw > state_manager.get_item('face_detector_score'))[0] keep_indices = numpy.where(face_scores_raw > face_detector_score)[0]
if numpy.any(keep_indices): if numpy.any(keep_indices):
bounding_box_raw, face_landmark_5_raw, score_raw = bounding_box_raw[keep_indices], face_landmark_5_raw[keep_indices], score_raw[keep_indices] bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = bounding_boxes_raw[keep_indices], face_scores_raw[keep_indices], face_landmarks_5_raw[keep_indices]
for bounding_box in bounding_box_raw: for bounding_box_raw in bounding_boxes_raw:
bounding_boxes.append(numpy.array( bounding_boxes.append(numpy.array(
[ [
(bounding_box[0] - bounding_box[2] / 2) * ratio_width, (bounding_box_raw[0] - bounding_box_raw[2] / 2) * ratio_width,
(bounding_box[1] - bounding_box[3] / 2) * ratio_height, (bounding_box_raw[1] - bounding_box_raw[3] / 2) * ratio_height,
(bounding_box[0] + bounding_box[2] / 2) * ratio_width, (bounding_box_raw[0] + bounding_box_raw[2] / 2) * ratio_width,
(bounding_box[1] + bounding_box[3] / 2) * ratio_height, (bounding_box_raw[1] + bounding_box_raw[3] / 2) * ratio_height
])) ]))
face_scores = score_raw.ravel().tolist() face_scores = face_scores_raw.ravel().tolist()
face_landmark_5_raw[:, 0::3] = (face_landmark_5_raw[:, 0::3]) * ratio_width face_landmarks_5_raw[:, 0::3] = (face_landmarks_5_raw[:, 0::3]) * ratio_width
face_landmark_5_raw[:, 1::3] = (face_landmark_5_raw[:, 1::3]) * ratio_height face_landmarks_5_raw[:, 1::3] = (face_landmarks_5_raw[:, 1::3]) * ratio_height
for face_landmark_5 in face_landmark_5_raw: for face_landmark_raw_5 in face_landmarks_5_raw:
face_landmarks_5.append(numpy.array(face_landmark_5.reshape(-1, 3)[:, :2])) face_landmarks_5.append(numpy.array(face_landmark_raw_5.reshape(-1, 3)[:, :2]))
return bounding_boxes, face_scores, face_landmarks_5 return bounding_boxes, face_scores, face_landmarks_5
@@ -293,8 +295,8 @@ def forward_with_scrfd(detect_vision_frame : VisionFrame) -> Detection:
return detection return detection
def forward_with_yoloface(detect_vision_frame : VisionFrame) -> Detection: def forward_with_yolo_face(detect_vision_frame : VisionFrame) -> Detection:
face_detector = get_inference_pool().get('yoloface') face_detector = get_inference_pool().get('yolo_face')
with thread_semaphore(): with thread_semaphore():
detection = face_detector.run(None, detection = face_detector.run(None,
@@ -309,6 +311,13 @@ def prepare_detect_frame(temp_vision_frame : VisionFrame, face_detector_size : s
face_detector_width, face_detector_height = unpack_resolution(face_detector_size) face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
detect_vision_frame = numpy.zeros((face_detector_height, face_detector_width, 3)) detect_vision_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
detect_vision_frame[:temp_vision_frame.shape[0], :temp_vision_frame.shape[1], :] = temp_vision_frame detect_vision_frame[:temp_vision_frame.shape[0], :temp_vision_frame.shape[1], :] = temp_vision_frame
detect_vision_frame = (detect_vision_frame - 127.5) / 128.0
detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
return detect_vision_frame return detect_vision_frame
def normalize_detect_frame(detect_vision_frame : VisionFrame, normalize_range : Sequence[int]) -> VisionFrame:
if normalize_range == [ -1, 1 ]:
return (detect_vision_frame - 127.5) / 128.0
if normalize_range == [ 0, 1 ]:
return detect_vision_frame / 255.0
return detect_vision_frame

View File

@@ -5,9 +5,9 @@ import cv2
import numpy import numpy
from cv2.typing import Size from cv2.typing import Size
from facefusion.typing import Anchors, Angle, BoundingBox, Distance, FaceDetectorModel, FaceLandmark5, FaceLandmark68, Mask, Matrix, Points, Scale, Score, Translation, VisionFrame, WarpTemplate, WarpTemplateSet from facefusion.types import Anchors, Angle, BoundingBox, Distance, FaceDetectorModel, FaceLandmark5, FaceLandmark68, Mask, Matrix, Points, Scale, Score, Translation, VisionFrame, WarpTemplate, WarpTemplateSet
WARP_TEMPLATES : WarpTemplateSet =\ WARP_TEMPLATE_SET : WarpTemplateSet =\
{ {
'arcface_112_v1': numpy.array( 'arcface_112_v1': numpy.array(
[ [
@@ -25,7 +25,7 @@ WARP_TEMPLATES : WarpTemplateSet =\
[ 0.37097589, 0.82469196 ], [ 0.37097589, 0.82469196 ],
[ 0.63151696, 0.82325089 ] [ 0.63151696, 0.82325089 ]
]), ]),
'arcface_128_v2': numpy.array( 'arcface_128': numpy.array(
[ [
[ 0.36167656, 0.40387734 ], [ 0.36167656, 0.40387734 ],
[ 0.63696719, 0.40235469 ], [ 0.63696719, 0.40235469 ],
@@ -69,7 +69,7 @@ WARP_TEMPLATES : WarpTemplateSet =\
def estimate_matrix_by_face_landmark_5(face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Matrix: def estimate_matrix_by_face_landmark_5(face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Matrix:
normed_warp_template = WARP_TEMPLATES.get(warp_template) * crop_size normed_warp_template = WARP_TEMPLATE_SET.get(warp_template) * crop_size
affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0] affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
return affine_matrix return affine_matrix
@@ -208,9 +208,9 @@ def estimate_face_angle(face_landmark_68 : FaceLandmark68) -> Angle:
return face_angle return face_angle
def apply_nms(bounding_boxes : List[BoundingBox], face_scores : List[Score], score_threshold : float, nms_threshold : float) -> Sequence[int]: def apply_nms(bounding_boxes : List[BoundingBox], scores : List[Score], score_threshold : float, nms_threshold : float) -> Sequence[int]:
normed_bounding_boxes = [ (x1, y1, x2 - x1, y2 - y1) for (x1, y1, x2, y2) in bounding_boxes ] normed_bounding_boxes = [ (x1, y1, x2 - x1, y2 - y1) for (x1, y1, x2, y2) in bounding_boxes ]
keep_indices = cv2.dnn.NMSBoxes(normed_bounding_boxes, face_scores, score_threshold = score_threshold, nms_threshold = nms_threshold) keep_indices = cv2.dnn.NMSBoxes(normed_bounding_boxes, scores, score_threshold = score_threshold, nms_threshold = nms_threshold)
return keep_indices return keep_indices

View File

@@ -9,7 +9,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.face_helper import create_rotated_matrix_and_size, estimate_matrix_by_face_landmark_5, transform_points, warp_face_by_translation from facefusion.face_helper import create_rotated_matrix_and_size, estimate_matrix_by_face_landmark_5, transform_points, warp_face_by_translation
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import Angle, BoundingBox, DownloadScope, DownloadSet, FaceLandmark5, FaceLandmark68, InferencePool, ModelSet, Prediction, Score, VisionFrame from facefusion.types import Angle, BoundingBox, DownloadScope, DownloadSet, FaceLandmark5, FaceLandmark68, InferencePool, ModelSet, Prediction, Score, VisionFrame
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -79,43 +79,43 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
_, model_sources = collect_model_downloads() model_names = [ state_manager.get_item('face_landmarker_model'), 'fan_68_5' ]
return inference_manager.get_inference_pool(__name__, model_sources) _, model_source_set = collect_model_downloads()
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('face_landmarker_model'), 'fan_68_5' ]
inference_manager.clear_inference_pool(__name__, model_names)
def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
model_set = create_static_model_set('full') model_set = create_static_model_set('full')
model_hashes =\ model_hash_set =\
{ {
'fan_68_5': model_set.get('fan_68_5').get('hashes').get('fan_68_5') 'fan_68_5': model_set.get('fan_68_5').get('hashes').get('fan_68_5')
} }
model_sources =\ model_source_set =\
{ {
'fan_68_5': model_set.get('fan_68_5').get('sources').get('fan_68_5') 'fan_68_5': model_set.get('fan_68_5').get('sources').get('fan_68_5')
} }
if state_manager.get_item('face_landmarker_model') in [ 'many', '2dfan4' ]: for face_landmarker_model in [ '2dfan4', 'peppa_wutz' ]:
model_hashes['2dfan4'] = model_set.get('2dfan4').get('hashes').get('2dfan4') if state_manager.get_item('face_landmarker_model') in [ 'many', face_landmarker_model ]:
model_sources['2dfan4'] = model_set.get('2dfan4').get('sources').get('2dfan4') model_hash_set[face_landmarker_model] = model_set.get(face_landmarker_model).get('hashes').get(face_landmarker_model)
model_source_set[face_landmarker_model] = model_set.get(face_landmarker_model).get('sources').get(face_landmarker_model)
if state_manager.get_item('face_landmarker_model') in [ 'many', 'peppa_wutz' ]: return model_hash_set, model_source_set
model_hashes['peppa_wutz'] = model_set.get('peppa_wutz').get('hashes').get('peppa_wutz')
model_sources['peppa_wutz'] = model_set.get('peppa_wutz').get('sources').get('peppa_wutz')
return model_hashes, model_sources
def pre_check() -> bool: def pre_check() -> bool:
model_hashes, model_sources = collect_model_downloads() model_hash_set, model_source_set = collect_model_downloads()
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def detect_face_landmarks(vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]: def detect_face_landmark(vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]:
face_landmark_2dfan4 = None face_landmark_2dfan4 = None
face_landmark_peppa_wutz = None face_landmark_peppa_wutz = None
face_landmark_score_2dfan4 = 0.0 face_landmark_score_2dfan4 = 0.0

View File

@@ -10,7 +10,7 @@ from facefusion import inference_manager, state_manager
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame from facefusion.types import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -57,6 +57,26 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
}, },
'size': (256, 256) 'size': (256, 256)
}, },
'xseg_3':
{
'hashes':
{
'face_occluder':
{
'url': resolve_download_url('models-3.2.0', 'xseg_3.hash'),
'path': resolve_relative_path('../.assets/models/xseg_3.hash')
}
},
'sources':
{
'face_occluder':
{
'url': resolve_download_url('models-3.2.0', 'xseg_3.onnx'),
'path': resolve_relative_path('../.assets/models/xseg_3.onnx')
}
},
'size': (256, 256)
},
'bisenet_resnet_18': 'bisenet_resnet_18':
{ {
'hashes': 'hashes':
@@ -101,42 +121,39 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
_, model_sources = collect_model_downloads() model_names = [state_manager.get_item('face_occluder_model'), state_manager.get_item('face_parser_model')]
return inference_manager.get_inference_pool(__name__, model_sources) _, model_source_set = collect_model_downloads()
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('face_occluder_model'), state_manager.get_item('face_parser_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
model_hashes = {}
model_sources = {}
model_set = create_static_model_set('full') model_set = create_static_model_set('full')
model_hash_set = {}
model_source_set = {}
if state_manager.get_item('face_occluder_model') == 'xseg_1': for face_occluder_model in [ 'xseg_1', 'xseg_2', 'xseg_3' ]:
model_hashes['xseg_1'] = model_set.get('xseg_1').get('hashes').get('face_occluder') if state_manager.get_item('face_occluder_model') == face_occluder_model:
model_sources['xseg_1'] = model_set.get('xseg_1').get('sources').get('face_occluder') model_hash_set[face_occluder_model] = model_set.get(face_occluder_model).get('hashes').get('face_occluder')
model_source_set[face_occluder_model] = model_set.get(face_occluder_model).get('sources').get('face_occluder')
if state_manager.get_item('face_occluder_model') == 'xseg_2': for face_parser_model in [ 'bisenet_resnet_18', 'bisenet_resnet_34' ]:
model_hashes['xseg_2'] = model_set.get('xseg_2').get('hashes').get('face_occluder') if state_manager.get_item('face_parser_model') == face_parser_model:
model_sources['xseg_2'] = model_set.get('xseg_2').get('sources').get('face_occluder') model_hash_set[face_parser_model] = model_set.get(face_parser_model).get('hashes').get('face_parser')
model_source_set[face_parser_model] = model_set.get(face_parser_model).get('sources').get('face_parser')
if state_manager.get_item('face_parser_model') == 'bisenet_resnet_18': return model_hash_set, model_source_set
model_hashes['bisenet_resnet_18'] = model_set.get('bisenet_resnet_18').get('hashes').get('face_parser')
model_sources['bisenet_resnet_18'] = model_set.get('bisenet_resnet_18').get('sources').get('face_parser')
if state_manager.get_item('face_parser_model') == 'bisenet_resnet_34':
model_hashes['bisenet_resnet_34'] = model_set.get('bisenet_resnet_34').get('hashes').get('face_parser')
model_sources['bisenet_resnet_34'] = model_set.get('bisenet_resnet_34').get('sources').get('face_parser')
return model_hashes, model_sources
def pre_check() -> bool: def pre_check() -> bool:
model_hashes, model_sources = collect_model_downloads() model_hash_set, model_source_set = collect_model_downloads()
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -154,10 +171,10 @@ def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_p
def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask: def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask:
face_occluder_model = state_manager.get_item('face_occluder_model') model_name = state_manager.get_item('face_occluder_model')
model_size = create_static_model_set('full').get(face_occluder_model).get('size') model_size = create_static_model_set('full').get(model_name).get('size')
prepare_vision_frame = cv2.resize(crop_vision_frame, model_size) prepare_vision_frame = cv2.resize(crop_vision_frame, model_size)
prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255 prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255.0
prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3) prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3)
occlusion_mask = forward_occlude_face(prepare_vision_frame) occlusion_mask = forward_occlude_face(prepare_vision_frame)
occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32) occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
@@ -167,10 +184,10 @@ def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask:
def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask: def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
face_parser_model = state_manager.get_item('face_parser_model') model_name = state_manager.get_item('face_parser_model')
model_size = create_static_model_set('full').get(face_parser_model).get('size') model_size = create_static_model_set('full').get(model_name).get('size')
prepare_vision_frame = cv2.resize(crop_vision_frame, model_size) prepare_vision_frame = cv2.resize(crop_vision_frame, model_size)
prepare_vision_frame = prepare_vision_frame[:, :, ::-1].astype(numpy.float32) / 255 prepare_vision_frame = prepare_vision_frame[:, :, ::-1].astype(numpy.float32) / 255.0
prepare_vision_frame = numpy.subtract(prepare_vision_frame, numpy.array([ 0.485, 0.456, 0.406 ]).astype(numpy.float32)) prepare_vision_frame = numpy.subtract(prepare_vision_frame, numpy.array([ 0.485, 0.456, 0.406 ]).astype(numpy.float32))
prepare_vision_frame = numpy.divide(prepare_vision_frame, numpy.array([ 0.229, 0.224, 0.225 ]).astype(numpy.float32)) prepare_vision_frame = numpy.divide(prepare_vision_frame, numpy.array([ 0.229, 0.224, 0.225 ]).astype(numpy.float32))
prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0) prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0)
@@ -192,8 +209,8 @@ def create_mouth_mask(face_landmark_68 : FaceLandmark68) -> Mask:
def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask: def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask:
face_occluder_model = state_manager.get_item('face_occluder_model') model_name = state_manager.get_item('face_occluder_model')
face_occluder = get_inference_pool().get(face_occluder_model) face_occluder = get_inference_pool().get(model_name)
with conditional_thread_semaphore(): with conditional_thread_semaphore():
occlusion_mask : Mask = face_occluder.run(None, occlusion_mask : Mask = face_occluder.run(None,
@@ -205,8 +222,8 @@ def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask:
def forward_parse_face(prepare_vision_frame : VisionFrame) -> Mask: def forward_parse_face(prepare_vision_frame : VisionFrame) -> Mask:
face_parser_model = state_manager.get_item('face_parser_model') model_name = state_manager.get_item('face_parser_model')
face_parser = get_inference_pool().get(face_parser_model) face_parser = get_inference_pool().get(model_name)
with conditional_thread_semaphore(): with conditional_thread_semaphore():
region_mask : Mask = face_parser.run(None, region_mask : Mask = face_parser.run(None,

View File

@@ -8,7 +8,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.face_helper import warp_face_by_face_landmark_5 from facefusion.face_helper import warp_face_by_face_landmark_5
from facefusion.filesystem import resolve_relative_path from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import DownloadScope, Embedding, FaceLandmark5, InferencePool, ModelOptions, ModelSet, VisionFrame from facefusion.types import DownloadScope, Embedding, FaceLandmark5, InferencePool, ModelOptions, ModelSet, VisionFrame
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -40,12 +40,15 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ 'arcface' ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ 'arcface' ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
@@ -53,10 +56,10 @@ def get_model_options() -> ModelOptions:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Embedding, Embedding]: def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Embedding, Embedding]:

View File

@@ -3,7 +3,7 @@ from typing import List
import numpy import numpy
from facefusion import state_manager from facefusion import state_manager
from facefusion.typing import Face, FaceSelectorOrder, FaceSet, Gender, Race from facefusion.types import Face, FaceSelectorOrder, FaceSet, Gender, Race
def find_similar_faces(faces : List[Face], reference_faces : FaceSet, face_distance : float) -> List[Face]: def find_similar_faces(faces : List[Face], reference_faces : FaceSet, face_distance : float) -> List[Face]:
@@ -21,6 +21,7 @@ def find_similar_faces(faces : List[Face], reference_faces : FaceSet, face_dista
def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool: def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
current_face_distance = calc_face_distance(face, reference_face) current_face_distance = calc_face_distance(face, reference_face)
current_face_distance = float(numpy.interp(current_face_distance, [ 0, 2 ], [ 0, 1 ]))
return current_face_distance < face_distance return current_face_distance < face_distance

View File

@@ -3,7 +3,7 @@ from typing import List, Optional
import numpy import numpy
from facefusion.typing import Face, FaceSet, FaceStore, VisionFrame from facefusion.types import Face, FaceSet, FaceStore, VisionFrame
FACE_STORE : FaceStore =\ FACE_STORE : FaceStore =\
{ {
@@ -34,7 +34,10 @@ def clear_static_faces() -> None:
def create_frame_hash(vision_frame : VisionFrame) -> Optional[str]: def create_frame_hash(vision_frame : VisionFrame) -> Optional[str]:
return hashlib.sha1(vision_frame.tobytes()).hexdigest() if numpy.any(vision_frame) else None if numpy.any(vision_frame):
frame_hash = hashlib.blake2b(vision_frame.tobytes(), digest_size = 16).hexdigest()
return frame_hash
return None
def get_reference_faces() -> Optional[FaceSet]: def get_reference_faces() -> Optional[FaceSet]:

View File

@@ -1,23 +1,23 @@
import os import os
import shutil
import subprocess import subprocess
import tempfile import tempfile
from typing import List, Optional from typing import List, Optional
import filetype
from tqdm import tqdm from tqdm import tqdm
from facefusion import logger, process_manager, state_manager, wording import facefusion.choices
from facefusion.filesystem import remove_file from facefusion import ffmpeg_builder, logger, process_manager, state_manager, wording
from facefusion.temp_helper import get_temp_file_path, get_temp_frame_paths, get_temp_frames_pattern from facefusion.filesystem import get_file_format, remove_file
from facefusion.typing import AudioBuffer, Fps, OutputVideoPreset, UpdateProgress from facefusion.temp_helper import get_temp_file_path, get_temp_frames_pattern
from facefusion.vision import count_trim_frame_total, detect_video_duration, restrict_video_fps from facefusion.types import AudioBuffer, Commands, EncoderSet, Fps, UpdateProgress
from facefusion.vision import detect_video_duration, detect_video_fps, predict_video_frame_total
def run_ffmpeg_with_progress(args: List[str], update_progress : UpdateProgress) -> subprocess.Popen[bytes]: def run_ffmpeg_with_progress(commands : Commands, update_progress : UpdateProgress) -> subprocess.Popen[bytes]:
log_level = state_manager.get_item('log_level') log_level = state_manager.get_item('log_level')
commands = [ shutil.which('ffmpeg'), '-hide_banner', '-nostats', '-loglevel', 'error', '-progress', '-' ] commands.extend(ffmpeg_builder.set_progress())
commands.extend(args) commands.extend(ffmpeg_builder.cast_stream())
commands = ffmpeg_builder.run(commands)
process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE) process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
while process_manager.is_processing(): while process_manager.is_processing():
@@ -40,10 +40,9 @@ def run_ffmpeg_with_progress(args: List[str], update_progress : UpdateProgress)
return process return process
def run_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: def run_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
log_level = state_manager.get_item('log_level') log_level = state_manager.get_item('log_level')
commands = [ shutil.which('ffmpeg'), '-hide_banner', '-nostats', '-loglevel', 'error' ] commands = ffmpeg_builder.run(commands)
commands.extend(args)
process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE) process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
while process_manager.is_processing(): while process_manager.is_processing():
@@ -60,9 +59,8 @@ def run_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]:
return process return process
def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: def open_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
commands = [ shutil.which('ffmpeg'), '-loglevel', 'quiet' ] commands = ffmpeg_builder.run(commands)
commands.extend(args)
return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE) return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
@@ -75,100 +73,84 @@ def log_debug(process : subprocess.Popen[bytes]) -> None:
logger.debug(error.strip(), __name__) logger.debug(error.strip(), __name__)
def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: def get_available_encoder_set() -> EncoderSet:
extract_frame_total = count_trim_frame_total(target_path, trim_frame_start, trim_frame_end) available_encoder_set : EncoderSet =\
temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d') {
commands = [ '-i', target_path, '-s', str(temp_video_resolution), '-q:v', '0' ] 'audio': [],
'video': []
}
commands = ffmpeg_builder.chain(
ffmpeg_builder.get_encoders()
)
process = run_ffmpeg(commands)
if isinstance(trim_frame_start, int) and isinstance(trim_frame_end, int): while line := process.stdout.readline().decode().lower():
commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(temp_video_fps) ]) if line.startswith(' a'):
elif isinstance(trim_frame_start, int): audio_encoder = line.split()[1]
commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(temp_video_fps) ])
elif isinstance(trim_frame_end, int): if audio_encoder in facefusion.choices.output_audio_encoders:
commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(temp_video_fps) ]) index = facefusion.choices.output_audio_encoders.index(audio_encoder) #type:ignore[arg-type]
else: available_encoder_set['audio'].insert(index, audio_encoder) #type:ignore[arg-type]
commands.extend([ '-vf', 'fps=' + str(temp_video_fps) ]) if line.startswith(' v'):
commands.extend([ '-vsync', '0', temp_frames_pattern ]) video_encoder = line.split()[1]
if video_encoder in facefusion.choices.output_video_encoders:
index = facefusion.choices.output_video_encoders.index(video_encoder) #type:ignore[arg-type]
available_encoder_set['video'].insert(index, video_encoder) #type:ignore[arg-type]
return available_encoder_set
def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool:
extract_frame_total = predict_video_frame_total(target_path, temp_video_fps, trim_frame_start, trim_frame_end)
temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d')
commands = ffmpeg_builder.chain(
ffmpeg_builder.set_input(target_path),
ffmpeg_builder.set_media_resolution(temp_video_resolution),
ffmpeg_builder.set_frame_quality(0),
ffmpeg_builder.select_frame_range(trim_frame_start, trim_frame_end, temp_video_fps),
ffmpeg_builder.prevent_frame_drop(),
ffmpeg_builder.set_output(temp_frames_pattern)
)
with tqdm(total = extract_frame_total, desc = wording.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: with tqdm(total = extract_frame_total, desc = wording.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n)) process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n))
return process.returncode == 0 return process.returncode == 0
def merge_video(target_path : str, output_video_resolution : str, output_video_fps: Fps) -> bool:
output_video_encoder = state_manager.get_item('output_video_encoder')
output_video_quality = state_manager.get_item('output_video_quality')
output_video_preset = state_manager.get_item('output_video_preset')
merge_frame_total = len(get_temp_frame_paths(target_path))
temp_video_fps = restrict_video_fps(target_path, output_video_fps)
temp_file_path = get_temp_file_path(target_path)
temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d')
is_webm = filetype.guess_mime(target_path) == 'video/webm'
if is_webm:
output_video_encoder = 'libvpx-vp9'
commands = [ '-r', str(temp_video_fps), '-i', temp_frames_pattern, '-s', str(output_video_resolution), '-c:v', output_video_encoder ]
if output_video_encoder in [ 'libx264', 'libx265' ]:
output_video_compression = round(51 - (output_video_quality * 0.51))
commands.extend([ '-crf', str(output_video_compression), '-preset', output_video_preset ])
if output_video_encoder in [ 'libvpx-vp9' ]:
output_video_compression = round(63 - (output_video_quality * 0.63))
commands.extend([ '-crf', str(output_video_compression) ])
if output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
output_video_compression = round(51 - (output_video_quality * 0.51))
commands.extend([ '-cq', str(output_video_compression), '-preset', map_nvenc_preset(output_video_preset) ])
if output_video_encoder in [ 'h264_amf', 'hevc_amf' ]:
output_video_compression = round(51 - (output_video_quality * 0.51))
commands.extend([ '-qp_i', str(output_video_compression), '-qp_p', str(output_video_compression), '-quality', map_amf_preset(output_video_preset) ])
if output_video_encoder in [ 'h264_videotoolbox', 'hevc_videotoolbox' ]:
commands.extend([ '-q:v', str(output_video_quality) ])
commands.extend([ '-vf', 'framerate=fps=' + str(output_video_fps), '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_file_path ])
with tqdm(total = merge_frame_total, desc = wording.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n))
return process.returncode == 0
def concat_video(output_path : str, temp_output_paths : List[str]) -> bool:
output_audio_encoder = state_manager.get_item('output_audio_encoder')
concat_video_path = tempfile.mktemp()
with open(concat_video_path, 'w') as concat_video_file:
for temp_output_path in temp_output_paths:
concat_video_file.write('file \'' + os.path.abspath(temp_output_path) + '\'' + os.linesep)
concat_video_file.flush()
concat_video_file.close()
commands = [ '-f', 'concat', '-safe', '0', '-i', concat_video_file.name, '-c:v', 'copy', '-c:a', output_audio_encoder, '-y', os.path.abspath(output_path) ]
process = run_ffmpeg(commands)
process.communicate()
remove_file(concat_video_path)
return process.returncode == 0
def copy_image(target_path : str, temp_image_resolution : str) -> bool: def copy_image(target_path : str, temp_image_resolution : str) -> bool:
temp_file_path = get_temp_file_path(target_path) temp_file_path = get_temp_file_path(target_path)
temp_image_compression = calc_image_compression(target_path, 100) commands = ffmpeg_builder.chain(
commands = [ '-i', target_path, '-s', str(temp_image_resolution), '-q:v', str(temp_image_compression), '-y', temp_file_path ] ffmpeg_builder.set_input(target_path),
ffmpeg_builder.set_media_resolution(temp_image_resolution),
ffmpeg_builder.set_image_quality(target_path, 100),
ffmpeg_builder.force_output(temp_file_path)
)
return run_ffmpeg(commands).returncode == 0 return run_ffmpeg(commands).returncode == 0
def finalize_image(target_path : str, output_path : str, output_image_resolution : str) -> bool: def finalize_image(target_path : str, output_path : str, output_image_resolution : str) -> bool:
output_image_quality = state_manager.get_item('output_image_quality') output_image_quality = state_manager.get_item('output_image_quality')
temp_file_path = get_temp_file_path(target_path) temp_file_path = get_temp_file_path(target_path)
output_image_compression = calc_image_compression(target_path, output_image_quality) commands = ffmpeg_builder.chain(
commands = [ '-i', temp_file_path, '-s', str(output_image_resolution), '-q:v', str(output_image_compression), '-y', output_path ] ffmpeg_builder.set_input(temp_file_path),
ffmpeg_builder.set_media_resolution(output_image_resolution),
ffmpeg_builder.set_image_quality(target_path, output_image_quality),
ffmpeg_builder.force_output(output_path)
)
return run_ffmpeg(commands).returncode == 0 return run_ffmpeg(commands).returncode == 0
def calc_image_compression(image_path : str, image_quality : int) -> int: def read_audio_buffer(target_path : str, audio_sample_rate : int, audio_sample_size : int, audio_channel_total : int) -> Optional[AudioBuffer]:
is_webp = filetype.guess_mime(image_path) == 'image/webp' commands = ffmpeg_builder.chain(
if is_webp: ffmpeg_builder.set_input(target_path),
image_quality = 100 - image_quality ffmpeg_builder.ignore_video_stream(),
return round(31 - (image_quality * 0.31)) ffmpeg_builder.set_audio_sample_rate(audio_sample_rate),
ffmpeg_builder.set_audio_sample_size(audio_sample_size),
ffmpeg_builder.set_audio_channel_total(audio_channel_total),
ffmpeg_builder.cast_stream()
)
def read_audio_buffer(target_path : str, sample_rate : int, channel_total : int) -> Optional[AudioBuffer]:
commands = [ '-i', target_path, '-vn', '-f', 's16le', '-acodec', 'pcm_s16le', '-ar', str(sample_rate), '-ac', str(channel_total), '-' ]
process = open_ffmpeg(commands) process = open_ffmpeg(commands)
audio_buffer, _ = process.communicate() audio_buffer, _ = process.communicate()
if process.returncode == 0: if process.returncode == 0:
@@ -176,55 +158,97 @@ def read_audio_buffer(target_path : str, sample_rate : int, channel_total : int)
return None return None
def restore_audio(target_path : str, output_path : str, output_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: def restore_audio(target_path : str, output_path : str, trim_frame_start : int, trim_frame_end : int) -> bool:
output_audio_encoder = state_manager.get_item('output_audio_encoder') output_audio_encoder = state_manager.get_item('output_audio_encoder')
output_audio_quality = state_manager.get_item('output_audio_quality')
output_audio_volume = state_manager.get_item('output_audio_volume')
target_video_fps = detect_video_fps(target_path)
temp_file_path = get_temp_file_path(target_path) temp_file_path = get_temp_file_path(target_path)
temp_video_duration = detect_video_duration(temp_file_path) temp_video_duration = detect_video_duration(temp_file_path)
commands = [ '-i', temp_file_path ]
if isinstance(trim_frame_start, int): commands = ffmpeg_builder.chain(
start_time = trim_frame_start / output_video_fps ffmpeg_builder.set_input(temp_file_path),
commands.extend([ '-ss', str(start_time) ]) ffmpeg_builder.select_media_range(trim_frame_start, trim_frame_end, target_video_fps),
if isinstance(trim_frame_end, int): ffmpeg_builder.set_input(target_path),
end_time = trim_frame_end / output_video_fps ffmpeg_builder.copy_video_encoder(),
commands.extend([ '-to', str(end_time) ]) ffmpeg_builder.set_audio_encoder(output_audio_encoder),
commands.extend([ '-i', target_path, '-c:v', 'copy', '-c:a', output_audio_encoder, '-map', '0:v:0', '-map', '1:a:0', '-t', str(temp_video_duration), '-y', output_path ]) ffmpeg_builder.set_audio_quality(output_audio_encoder, output_audio_quality),
ffmpeg_builder.set_audio_volume(output_audio_volume),
ffmpeg_builder.select_media_stream('0:v:0'),
ffmpeg_builder.select_media_stream('1:a:0'),
ffmpeg_builder.set_video_duration(temp_video_duration),
ffmpeg_builder.force_output(output_path)
)
return run_ffmpeg(commands).returncode == 0 return run_ffmpeg(commands).returncode == 0
def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool: def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool:
output_audio_encoder = state_manager.get_item('output_audio_encoder') output_audio_encoder = state_manager.get_item('output_audio_encoder')
output_audio_quality = state_manager.get_item('output_audio_quality')
output_audio_volume = state_manager.get_item('output_audio_volume')
temp_file_path = get_temp_file_path(target_path) temp_file_path = get_temp_file_path(target_path)
temp_video_duration = detect_video_duration(temp_file_path) temp_video_duration = detect_video_duration(temp_file_path)
commands = [ '-i', temp_file_path, '-i', audio_path, '-c:v', 'copy', '-c:a', output_audio_encoder, '-t', str(temp_video_duration), '-y', output_path ]
commands = ffmpeg_builder.chain(
ffmpeg_builder.set_input(temp_file_path),
ffmpeg_builder.set_input(audio_path),
ffmpeg_builder.copy_video_encoder(),
ffmpeg_builder.set_audio_encoder(output_audio_encoder),
ffmpeg_builder.set_audio_quality(output_audio_encoder, output_audio_quality),
ffmpeg_builder.set_audio_volume(output_audio_volume),
ffmpeg_builder.set_video_duration(temp_video_duration),
ffmpeg_builder.force_output(output_path)
)
return run_ffmpeg(commands).returncode == 0 return run_ffmpeg(commands).returncode == 0
def map_nvenc_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: def merge_video(target_path : str, temp_video_fps : Fps, output_video_resolution : str, output_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool:
if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]: output_video_encoder = state_manager.get_item('output_video_encoder')
return 'fast' output_video_quality = state_manager.get_item('output_video_quality')
if output_video_preset == 'medium': output_video_preset = state_manager.get_item('output_video_preset')
return 'medium' merge_frame_total = predict_video_frame_total(target_path, output_video_fps, trim_frame_start, trim_frame_end)
if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: temp_file_path = get_temp_file_path(target_path)
return 'slow' temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d')
return None
if get_file_format(target_path) == 'webm':
output_video_encoder = 'libvpx-vp9'
commands = ffmpeg_builder.chain(
ffmpeg_builder.set_conditional_fps(temp_video_fps),
ffmpeg_builder.set_input(temp_frames_pattern),
ffmpeg_builder.set_media_resolution(output_video_resolution),
ffmpeg_builder.set_video_encoder(output_video_encoder),
ffmpeg_builder.set_video_quality(output_video_encoder, output_video_quality),
ffmpeg_builder.set_video_preset(output_video_encoder, output_video_preset),
ffmpeg_builder.set_video_fps(output_video_fps),
ffmpeg_builder.set_pixel_format(output_video_encoder),
ffmpeg_builder.set_video_colorspace('bt709'),
ffmpeg_builder.force_output(temp_file_path)
)
with tqdm(total = merge_frame_total, desc = wording.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n))
return process.returncode == 0
def map_amf_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: def concat_video(output_path : str, temp_output_paths : List[str]) -> bool:
if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]: concat_video_path = tempfile.mktemp()
return 'speed'
if output_video_preset in [ 'faster', 'fast', 'medium' ]:
return 'balanced'
if output_video_preset in [ 'slow', 'slower', 'veryslow' ]:
return 'quality'
return None
with open(concat_video_path, 'w') as concat_video_file:
for temp_output_path in temp_output_paths:
concat_video_file.write('file \'' + os.path.abspath(temp_output_path) + '\'' + os.linesep)
concat_video_file.flush()
concat_video_file.close()
def map_qsv_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: output_path = os.path.abspath(output_path)
if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]: commands = ffmpeg_builder.chain(
return 'fast' ffmpeg_builder.unsafe_concat(),
if output_video_preset == 'medium': ffmpeg_builder.set_input(concat_video_file.name),
return 'medium' ffmpeg_builder.copy_video_encoder(),
if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: ffmpeg_builder.copy_audio_encoder(),
return 'slow' ffmpeg_builder.force_output(output_path)
return None )
process = run_ffmpeg(commands)
process.communicate()
remove_file(concat_video_path)
return process.returncode == 0

View File

@@ -0,0 +1,240 @@
import itertools
import shutil
from typing import Optional
import numpy
from facefusion.filesystem import get_file_format
from facefusion.types import AudioEncoder, Commands, Duration, Fps, StreamMode, VideoEncoder, VideoPreset
def run(commands : Commands) -> Commands:
return [ shutil.which('ffmpeg'), '-loglevel', 'error' ] + commands
def chain(*commands : Commands) -> Commands:
return list(itertools.chain(*commands))
def get_encoders() -> Commands:
return [ '-encoders' ]
def set_progress() -> Commands:
return [ '-progress' ]
def set_input(input_path : str) -> Commands:
return [ '-i', input_path ]
def set_conditional_fps(conditional_fps : Fps) -> Commands:
return [ '-r', str(conditional_fps) ]
def set_output(output_path : str) -> Commands:
return [ output_path ]
def force_output(output_path : str) -> Commands:
return [ '-y', output_path ]
def cast_stream() -> Commands:
return [ '-' ]
def set_stream_mode(stream_mode : StreamMode) -> Commands:
if stream_mode == 'udp':
return [ '-f', 'mpegts' ]
if stream_mode == 'v4l2':
return [ '-f', 'v4l2' ]
return []
def unsafe_concat() -> Commands:
return [ '-f', 'concat', '-safe', '0' ]
def set_pixel_format(video_encoder : VideoEncoder) -> Commands:
if video_encoder == 'rawvideo':
return [ '-pix_fmt', 'rgb24' ]
return [ '-pix_fmt', 'yuv420p' ]
def set_frame_quality(frame_quality : int) -> Commands:
return [ '-q:v', str(frame_quality) ]
def select_frame_range(frame_start : int, frame_end : int, video_fps : Fps) -> Commands:
if isinstance(frame_start, int) and isinstance(frame_end, int):
return [ '-vf', 'trim=start_frame=' + str(frame_start) + ':end_frame=' + str(frame_end) + ',fps=' + str(video_fps) ]
if isinstance(frame_start, int):
return [ '-vf', 'trim=start_frame=' + str(frame_start) + ',fps=' + str(video_fps) ]
if isinstance(frame_end, int):
return [ '-vf', 'trim=end_frame=' + str(frame_end) + ',fps=' + str(video_fps) ]
return [ '-vf', 'fps=' + str(video_fps) ]
def prevent_frame_drop() -> Commands:
return [ '-vsync', '0' ]
def select_media_range(frame_start : int, frame_end : int, media_fps : Fps) -> Commands:
commands = []
if isinstance(frame_start, int):
commands.extend([ '-ss', str(frame_start / media_fps) ])
if isinstance(frame_end, int):
commands.extend([ '-to', str(frame_end / media_fps) ])
return commands
def select_media_stream(media_stream : str) -> Commands:
return [ '-map', media_stream ]
def set_media_resolution(video_resolution : str) -> Commands:
return [ '-s', video_resolution ]
def set_image_quality(image_path : str, image_quality : int) -> Commands:
if get_file_format(image_path) == 'webp':
image_compression = image_quality
else:
image_compression = round(31 - (image_quality * 0.31))
return [ '-q:v', str(image_compression) ]
def set_audio_encoder(audio_codec : str) -> Commands:
return [ '-c:a', audio_codec ]
def copy_audio_encoder() -> Commands:
return set_audio_encoder('copy')
def set_audio_sample_rate(audio_sample_rate : int) -> Commands:
return [ '-ar', str(audio_sample_rate) ]
def set_audio_sample_size(audio_sample_size : int) -> Commands:
if audio_sample_size == 16:
return [ '-f', 's16le' ]
if audio_sample_size == 32:
return [ '-f', 's32le' ]
return []
def set_audio_channel_total(audio_channel_total : int) -> Commands:
return [ '-ac', str(audio_channel_total) ]
def set_audio_quality(audio_encoder : AudioEncoder, audio_quality : int) -> Commands:
if audio_encoder == 'aac':
audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ 0.1, 2.0 ]), 1)
return [ '-q:a', str(audio_compression) ]
if audio_encoder == 'libmp3lame':
audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ 9, 0 ]))
return [ '-q:a', str(audio_compression) ]
if audio_encoder == 'libopus':
audio_bit_rate = round(numpy.interp(audio_quality, [ 0, 100 ], [ 64, 320 ]))
return [ '-b:a', str(audio_bit_rate) + 'k' ]
if audio_encoder == 'libvorbis':
audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ -1, 10 ]), 1)
return [ '-q:a', str(audio_compression) ]
return []
def set_audio_volume(audio_volume : int) -> Commands:
return [ '-filter:a', 'volume=' + str(audio_volume / 100) ]
def set_video_encoder(video_encoder : str) -> Commands:
return [ '-c:v', video_encoder ]
def copy_video_encoder() -> Commands:
return set_video_encoder('copy')
def set_video_quality(video_encoder : VideoEncoder, video_quality : int) -> Commands:
if video_encoder in [ 'libx264', 'libx265' ]:
video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
return [ '-crf', str(video_compression) ]
if video_encoder == 'libvpx-vp9':
video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 63, 0 ]))
return [ '-crf', str(video_compression) ]
if video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
return [ '-cq', str(video_compression) ]
if video_encoder in [ 'h264_amf', 'hevc_amf' ]:
video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
return [ '-qp_i', str(video_compression), '-qp_p', str(video_compression), '-qp_b', str(video_compression) ]
if video_encoder in [ 'h264_qsv', 'hevc_qsv' ]:
video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
return [ '-qp', str(video_compression) ]
if video_encoder in [ 'h264_videotoolbox', 'hevc_videotoolbox' ]:
video_bit_rate = round(numpy.interp(video_quality, [ 0, 100 ], [ 1024, 50512 ]))
return [ '-b:v', str(video_bit_rate) + 'k' ]
return []
def set_video_preset(video_encoder : VideoEncoder, video_preset : VideoPreset) -> Commands:
if video_encoder in [ 'libx264', 'libx265' ]:
return [ '-preset', video_preset ]
if video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
return [ '-preset', map_nvenc_preset(video_preset) ]
if video_encoder in [ 'h264_amf', 'hevc_amf' ]:
return [ '-quality', map_amf_preset(video_preset) ]
if video_encoder in [ 'h264_qsv', 'hevc_qsv' ]:
return [ '-preset', map_qsv_preset(video_preset) ]
return []
def set_video_colorspace(video_colorspace : str) -> Commands:
return [ '-colorspace', video_colorspace ]
def set_video_fps(video_fps : Fps) -> Commands:
return [ '-vf', 'framerate=fps=' + str(video_fps) ]
def set_video_duration(video_duration : Duration) -> Commands:
return [ '-t', str(video_duration) ]
def capture_video() -> Commands:
return [ '-f', 'rawvideo' ]
def ignore_video_stream() -> Commands:
return [ '-vn' ]
def map_nvenc_preset(video_preset : VideoPreset) -> Optional[str]:
if video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]:
return 'fast'
if video_preset == 'medium':
return 'medium'
if video_preset in [ 'slow', 'slower', 'veryslow' ]:
return 'slow'
return None
def map_amf_preset(video_preset : VideoPreset) -> Optional[str]:
if video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]:
return 'speed'
if video_preset in [ 'faster', 'fast', 'medium' ]:
return 'balanced'
if video_preset in [ 'slow', 'slower', 'veryslow' ]:
return 'quality'
return None
def map_qsv_preset(video_preset : VideoPreset) -> Optional[str]:
if video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]:
return 'veryfast'
if video_preset in [ 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]:
return video_preset
return None

View File

@@ -1,16 +1,9 @@
import glob import glob
import os import os
import shutil import shutil
from pathlib import Path
from typing import List, Optional from typing import List, Optional
import filetype import facefusion.choices
from facefusion.common_helper import is_windows
from facefusion.typing import File
if is_windows():
import ctypes
def get_file_size(file_path : str) -> int: def get_file_size(file_path : str) -> int:
@@ -19,54 +12,95 @@ def get_file_size(file_path : str) -> int:
return 0 return 0
def same_file_extension(file_paths : List[str]) -> bool: def get_file_name(file_path : str) -> Optional[str]:
file_extensions : List[str] = [] file_name, _ = os.path.splitext(os.path.basename(file_path))
for file_path in file_paths: if file_name:
_, file_extension = os.path.splitext(file_path.lower()) return file_name
return None
if file_extensions and file_extension not in file_extensions:
return False def get_file_extension(file_path : str) -> Optional[str]:
file_extensions.append(file_extension) _, file_extension = os.path.splitext(file_path)
return True
if file_extension:
return file_extension.lower()
return None
def get_file_format(file_path : str) -> Optional[str]:
file_extension = get_file_extension(file_path)
if file_extension:
if file_extension == '.jpg':
return 'jpeg'
if file_extension == '.tif':
return 'tiff'
return file_extension.lstrip('.')
return None
def same_file_extension(first_file_path : str, second_file_path : str) -> bool:
first_file_extension = get_file_extension(first_file_path)
second_file_extension = get_file_extension(second_file_path)
if first_file_extension and second_file_extension:
return get_file_extension(first_file_path) == get_file_extension(second_file_path)
return False
def is_file(file_path : str) -> bool: def is_file(file_path : str) -> bool:
return bool(file_path and os.path.isfile(file_path)) if file_path:
return os.path.isfile(file_path)
def is_directory(directory_path : str) -> bool:
return bool(directory_path and os.path.isdir(directory_path))
def in_directory(file_path : str) -> bool:
if file_path and not is_directory(file_path):
return is_directory(os.path.dirname(file_path))
return False return False
def is_audio(audio_path : str) -> bool: def is_audio(audio_path : str) -> bool:
return is_file(audio_path) and filetype.helpers.is_audio(audio_path) return is_file(audio_path) and get_file_format(audio_path) in facefusion.choices.audio_formats
def has_audio(audio_paths : List[str]) -> bool: def has_audio(audio_paths : List[str]) -> bool:
if audio_paths: if audio_paths:
return any(is_audio(audio_path) for audio_path in audio_paths) return any(map(is_audio, audio_paths))
return False
def are_audios(audio_paths : List[str]) -> bool:
if audio_paths:
return all(map(is_audio, audio_paths))
return False return False
def is_image(image_path : str) -> bool: def is_image(image_path : str) -> bool:
return is_file(image_path) and filetype.helpers.is_image(image_path) return is_file(image_path) and get_file_format(image_path) in facefusion.choices.image_formats
def has_image(image_paths: List[str]) -> bool: def has_image(image_paths : List[str]) -> bool:
if image_paths: if image_paths:
return any(is_image(image_path) for image_path in image_paths) return any(is_image(image_path) for image_path in image_paths)
return False return False
def are_images(image_paths : List[str]) -> bool:
if image_paths:
return all(map(is_image, image_paths))
return False
def is_video(video_path : str) -> bool: def is_video(video_path : str) -> bool:
return is_file(video_path) and filetype.helpers.is_video(video_path) return is_file(video_path) and get_file_format(video_path) in facefusion.choices.video_formats
def has_video(video_paths : List[str]) -> bool:
if video_paths:
return any(map(is_video, video_paths))
return False
def are_videos(video_paths : List[str]) -> bool:
if video_paths:
return any(map(is_video, video_paths))
return False
def filter_audio_paths(paths : List[str]) -> List[str]: def filter_audio_paths(paths : List[str]) -> List[str]:
@@ -81,24 +115,6 @@ def filter_image_paths(paths : List[str]) -> List[str]:
return [] return []
def resolve_relative_path(path : str) -> str:
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
def sanitize_path_for_windows(full_path : str) -> Optional[str]:
buffer_size = 0
while True:
unicode_buffer = ctypes.create_unicode_buffer(buffer_size)
buffer_limit = ctypes.windll.kernel32.GetShortPathNameW(full_path, unicode_buffer, buffer_size) #type:ignore[attr-defined]
if buffer_size > buffer_limit:
return unicode_buffer.value
if buffer_limit == 0:
return None
buffer_size = buffer_limit
def copy_file(file_path : str, move_path : str) -> bool: def copy_file(file_path : str, move_path : str) -> bool:
if is_file(file_path): if is_file(file_path):
shutil.copy(file_path, move_path) shutil.copy(file_path, move_path)
@@ -120,31 +136,18 @@ def remove_file(file_path : str) -> bool:
return False return False
def create_directory(directory_path : str) -> bool: def resolve_file_paths(directory_path : str) -> List[str]:
if directory_path and not is_file(directory_path): file_paths : List[str] = []
Path(directory_path).mkdir(parents = True, exist_ok = True)
return is_directory(directory_path)
return False
def list_directory(directory_path : str) -> Optional[List[File]]:
if is_directory(directory_path): if is_directory(directory_path):
file_paths = sorted(os.listdir(directory_path)) file_names_and_extensions = sorted(os.listdir(directory_path))
files: List[File] = []
for file_path in file_paths: for file_name_and_extension in file_names_and_extensions:
file_name, file_extension = os.path.splitext(file_path) if not file_name_and_extension.startswith(('.', '__')):
file_path = os.path.join(directory_path, file_name_and_extension)
file_paths.append(file_path)
if not file_name.startswith(('.', '__')): return file_paths
files.append(
{
'name': file_name,
'extension': file_extension,
'path': os.path.join(directory_path, file_path)
})
return files
return None
def resolve_file_pattern(file_pattern : str) -> List[str]: def resolve_file_pattern(file_pattern : str) -> List[str]:
@@ -153,8 +156,33 @@ def resolve_file_pattern(file_pattern : str) -> List[str]:
return [] return []
def is_directory(directory_path : str) -> bool:
if directory_path:
return os.path.isdir(directory_path)
return False
def in_directory(file_path : str) -> bool:
if file_path:
directory_path = os.path.dirname(file_path)
if directory_path:
return not is_directory(file_path) and is_directory(directory_path)
return False
def create_directory(directory_path : str) -> bool:
if directory_path and not is_file(directory_path):
os.makedirs(directory_path, exist_ok = True)
return is_directory(directory_path)
return False
def remove_directory(directory_path : str) -> bool: def remove_directory(directory_path : str) -> bool:
if is_directory(directory_path): if is_directory(directory_path):
shutil.rmtree(directory_path, ignore_errors = True) shutil.rmtree(directory_path, ignore_errors = True)
return not is_directory(directory_path) return not is_directory(directory_path)
return False return False
def resolve_relative_path(path : str) -> str:
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))

View File

@@ -2,7 +2,7 @@ import os
import zlib import zlib
from typing import Optional from typing import Optional
from facefusion.filesystem import is_file from facefusion.filesystem import get_file_name, is_file
def create_hash(content : bytes) -> str: def create_hash(content : bytes) -> str:
@@ -13,8 +13,8 @@ def validate_hash(validate_path : str) -> bool:
hash_path = get_hash_path(validate_path) hash_path = get_hash_path(validate_path)
if is_file(hash_path): if is_file(hash_path):
with open(hash_path, 'r') as hash_file: with open(hash_path) as hash_file:
hash_content = hash_file.read().strip() hash_content = hash_file.read()
with open(validate_path, 'rb') as validate_file: with open(validate_path, 'rb') as validate_file:
validate_content = validate_file.read() validate_content = validate_file.read()
@@ -25,8 +25,8 @@ def validate_hash(validate_path : str) -> bool:
def get_hash_path(validate_path : str) -> Optional[str]: def get_hash_path(validate_path : str) -> Optional[str]:
if is_file(validate_path): if is_file(validate_path):
validate_directory_path, _ = os.path.split(validate_path) validate_directory_path, file_name_and_extension = os.path.split(validate_path)
validate_file_name, _ = os.path.splitext(_) validate_file_name = get_file_name(file_name_and_extension)
return os.path.join(validate_directory_path, validate_file_name + '.hash') return os.path.join(validate_directory_path, validate_file_name + '.hash')
return None return None

View File

@@ -1,3 +1,4 @@
import importlib
from time import sleep from time import sleep
from typing import List from typing import List
@@ -5,59 +6,69 @@ from onnxruntime import InferenceSession
from facefusion import process_manager, state_manager from facefusion import process_manager, state_manager
from facefusion.app_context import detect_app_context from facefusion.app_context import detect_app_context
from facefusion.execution import create_inference_execution_providers from facefusion.execution import create_inference_session_providers
from facefusion.thread_helper import thread_lock from facefusion.filesystem import is_file
from facefusion.typing import DownloadSet, ExecutionProvider, InferencePool, InferencePoolSet from facefusion.types import DownloadSet, ExecutionProvider, InferencePool, InferencePoolSet
INFERENCE_POOLS : InferencePoolSet =\ INFERENCE_POOL_SET : InferencePoolSet =\
{ {
'cli': {}, #type:ignore[typeddict-item] 'cli': {},
'ui': {} #type:ignore[typeddict-item] 'ui': {}
} }
def get_inference_pool(model_context : str, model_sources : DownloadSet) -> InferencePool: def get_inference_pool(module_name : str, model_names : List[str], model_source_set : DownloadSet) -> InferencePool:
global INFERENCE_POOLS while process_manager.is_checking():
sleep(0.5)
execution_device_id = state_manager.get_item('execution_device_id')
execution_providers = resolve_execution_providers(module_name)
app_context = detect_app_context()
inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
with thread_lock(): if app_context == 'cli' and INFERENCE_POOL_SET.get('ui').get(inference_context):
while process_manager.is_checking(): INFERENCE_POOL_SET['cli'][inference_context] = INFERENCE_POOL_SET.get('ui').get(inference_context)
sleep(0.5) if app_context == 'ui' and INFERENCE_POOL_SET.get('cli').get(inference_context):
app_context = detect_app_context() INFERENCE_POOL_SET['ui'][inference_context] = INFERENCE_POOL_SET.get('cli').get(inference_context)
inference_context = get_inference_context(model_context) if not INFERENCE_POOL_SET.get(app_context).get(inference_context):
INFERENCE_POOL_SET[app_context][inference_context] = create_inference_pool(model_source_set, execution_device_id, execution_providers)
if app_context == 'cli' and INFERENCE_POOLS.get('ui').get(inference_context): return INFERENCE_POOL_SET.get(app_context).get(inference_context)
INFERENCE_POOLS['cli'][inference_context] = INFERENCE_POOLS.get('ui').get(inference_context)
if app_context == 'ui' and INFERENCE_POOLS.get('cli').get(inference_context):
INFERENCE_POOLS['ui'][inference_context] = INFERENCE_POOLS.get('cli').get(inference_context)
if not INFERENCE_POOLS.get(app_context).get(inference_context):
INFERENCE_POOLS[app_context][inference_context] = create_inference_pool(model_sources, state_manager.get_item('execution_device_id'), state_manager.get_item('execution_providers'))
return INFERENCE_POOLS.get(app_context).get(inference_context)
def create_inference_pool(model_sources : DownloadSet, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferencePool: def create_inference_pool(model_source_set : DownloadSet, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferencePool:
inference_pool : InferencePool = {} inference_pool : InferencePool = {}
for model_name in model_sources.keys(): for model_name in model_source_set.keys():
inference_pool[model_name] = create_inference_session(model_sources.get(model_name).get('path'), execution_device_id, execution_providers) model_path = model_source_set.get(model_name).get('path')
if is_file(model_path):
inference_pool[model_name] = create_inference_session(model_path, execution_device_id, execution_providers)
return inference_pool return inference_pool
def clear_inference_pool(model_context : str) -> None: def clear_inference_pool(module_name : str, model_names : List[str]) -> None:
global INFERENCE_POOLS execution_device_id = state_manager.get_item('execution_device_id')
execution_providers = resolve_execution_providers(module_name)
app_context = detect_app_context() app_context = detect_app_context()
inference_context = get_inference_context(model_context) inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
if INFERENCE_POOLS.get(app_context).get(inference_context): if INFERENCE_POOL_SET.get(app_context).get(inference_context):
del INFERENCE_POOLS[app_context][inference_context] del INFERENCE_POOL_SET[app_context][inference_context]
def create_inference_session(model_path : str, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferenceSession: def create_inference_session(model_path : str, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferenceSession:
inference_execution_providers = create_inference_execution_providers(execution_device_id, execution_providers) inference_session_providers = create_inference_session_providers(execution_device_id, execution_providers)
return InferenceSession(model_path, providers = inference_execution_providers) return InferenceSession(model_path, providers = inference_session_providers)
def get_inference_context(model_context : str) -> str: def get_inference_context(module_name : str, model_names : List[str], execution_device_id : str, execution_providers : List[ExecutionProvider]) -> str:
inference_context = model_context + '.' + '_'.join(state_manager.get_item('execution_providers')) inference_context = '.'.join([ module_name ] + model_names + [ execution_device_id ] + list(execution_providers))
return inference_context return inference_context
def resolve_execution_providers(module_name : str) -> List[ExecutionProvider]:
module = importlib.import_module(module_name)
if hasattr(module, 'resolve_execution_providers'):
return getattr(module, 'resolve_execution_providers')()
return state_manager.get_item('execution_providers')

View File

@@ -3,31 +3,28 @@ import shutil
import signal import signal
import subprocess import subprocess
import sys import sys
import tempfile
from argparse import ArgumentParser, HelpFormatter from argparse import ArgumentParser, HelpFormatter
from typing import Dict, Tuple
from facefusion import metadata, wording from facefusion import metadata, wording
from facefusion.common_helper import is_linux, is_macos, is_windows from facefusion.common_helper import is_linux, is_windows
ONNXRUNTIMES : Dict[str, Tuple[str, str]] = {} ONNXRUNTIME_SET =\
{
if is_macos(): 'default': ('onnxruntime', '1.21.1')
ONNXRUNTIMES['default'] = ('onnxruntime', '1.20.1') }
else: if is_windows() or is_linux():
ONNXRUNTIMES['default'] = ('onnxruntime', '1.20.1') ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.21.1')
ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.20.1') ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.21.0')
ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.20.0')
if is_linux():
ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.19.0')
if is_windows(): if is_windows():
ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.17.3') ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.17.3')
if is_linux():
ONNXRUNTIME_SET['rocm'] = ('onnxruntime-rocm', '1.21.0')
def cli() -> None: def cli() -> None:
signal.signal(signal.SIGINT, lambda signal_number, frame: sys.exit(0)) signal.signal(signal.SIGINT, lambda signal_number, frame: sys.exit(0))
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 50)) program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 50))
program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys(), required = True) program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIME_SET.keys(), required = True)
program.add_argument('--skip-conda', help = wording.get('help.skip_conda'), action = 'store_true') program.add_argument('--skip-conda', help = wording.get('help.skip_conda'), action = 'store_true')
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
run(program) run(program)
@@ -36,27 +33,27 @@ def cli() -> None:
def run(program : ArgumentParser) -> None: def run(program : ArgumentParser) -> None:
args = program.parse_args() args = program.parse_args()
has_conda = 'CONDA_PREFIX' in os.environ has_conda = 'CONDA_PREFIX' in os.environ
onnxruntime_name, onnxruntime_version = ONNXRUNTIMES.get(args.onnxruntime) onnxruntime_name, onnxruntime_version = ONNXRUNTIME_SET.get(args.onnxruntime)
if not args.skip_conda and not has_conda: if not args.skip_conda and not has_conda:
sys.stdout.write(wording.get('conda_not_activated') + os.linesep) sys.stdout.write(wording.get('conda_not_activated') + os.linesep)
sys.exit(1) sys.exit(1)
subprocess.call([ shutil.which('pip'), 'install', '-r', 'requirements.txt', '--force-reinstall' ]) with open('requirements.txt') as file:
for line in file.readlines():
__line__ = line.strip()
if not __line__.startswith('onnxruntime'):
subprocess.call([ shutil.which('pip'), 'install', line, '--force-reinstall' ])
if args.onnxruntime == 'rocm': if args.onnxruntime == 'rocm':
python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor) python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
if python_id in [ 'cp310', 'cp312' ]: if python_id in [ 'cp310', 'cp312' ]:
wheel_name = 'onnxruntime_rocm-' + onnxruntime_version + '-' + python_id + '-' + python_id + '-linux_x86_64.whl' wheel_name = 'onnxruntime_rocm-' + onnxruntime_version + '-' + python_id + '-' + python_id + '-linux_x86_64.whl'
wheel_path = os.path.join(tempfile.gettempdir(), wheel_name) wheel_url = 'https://repo.radeon.com/rocm/manylinux/rocm-rel-6.4/' + wheel_name
wheel_url = 'https://repo.radeon.com/rocm/manylinux/rocm-rel-6.3.1/' + wheel_name subprocess.call([ shutil.which('pip'), 'install', wheel_url, '--force-reinstall' ])
subprocess.call([ shutil.which('curl'), '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ])
subprocess.call([ shutil.which('pip'), 'uninstall', 'onnxruntime', wheel_path, '-y', '-q' ])
subprocess.call([ shutil.which('pip'), 'install', wheel_path, '--force-reinstall' ])
os.remove(wheel_path)
else: else:
subprocess.call([ shutil.which('pip'), 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
subprocess.call([ shutil.which('pip'), 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ]) subprocess.call([ shutil.which('pip'), 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ])
if args.onnxruntime == 'cuda' and has_conda: if args.onnxruntime == 'cuda' and has_conda:
@@ -89,5 +86,5 @@ def run(program : ArgumentParser) -> None:
subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ]) subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ])
if args.onnxruntime in [ 'directml', 'rocm' ]: if args.onnxruntime == 'directml':
subprocess.call([ shutil.which('pip'), 'install', 'numpy==1.26.4', '--force-reinstall' ]) subprocess.call([ shutil.which('pip'), 'install', 'numpy==1.26.4', '--force-reinstall' ])

View File

@@ -2,11 +2,14 @@ import os
from datetime import datetime from datetime import datetime
from typing import Optional from typing import Optional
from facefusion.filesystem import get_file_extension, get_file_name
def get_step_output_path(job_id : str, step_index : int, output_path : str) -> Optional[str]: def get_step_output_path(job_id : str, step_index : int, output_path : str) -> Optional[str]:
if output_path: if output_path:
output_directory_path, _ = os.path.split(output_path) output_directory_path, _ = os.path.split(output_path)
output_file_name, output_file_extension = os.path.splitext(_) output_file_name = get_file_name(_)
output_file_extension = get_file_extension(_)
return os.path.join(output_directory_path, output_file_name + '-' + job_id + '-' + str(step_index) + output_file_extension) return os.path.join(output_directory_path, output_file_name + '-' + job_id + '-' + str(step_index) + output_file_extension)
return None return None

View File

@@ -3,7 +3,7 @@ from typing import Optional, Tuple
from facefusion.date_helper import describe_time_ago from facefusion.date_helper import describe_time_ago
from facefusion.jobs import job_manager from facefusion.jobs import job_manager
from facefusion.typing import JobStatus, TableContents, TableHeaders from facefusion.types import JobStatus, TableContents, TableHeaders
def compose_job_list(job_status : JobStatus) -> Tuple[TableHeaders, TableContents]: def compose_job_list(job_status : JobStatus) -> Tuple[TableHeaders, TableContents]:

View File

@@ -4,10 +4,10 @@ from typing import List, Optional
import facefusion.choices import facefusion.choices
from facefusion.date_helper import get_current_date_time from facefusion.date_helper import get_current_date_time
from facefusion.filesystem import create_directory, is_directory, is_file, move_file, remove_directory, remove_file, resolve_file_pattern from facefusion.filesystem import create_directory, get_file_name, is_directory, is_file, move_file, remove_directory, remove_file, resolve_file_pattern
from facefusion.jobs.job_helper import get_step_output_path from facefusion.jobs.job_helper import get_step_output_path
from facefusion.json import read_json, write_json from facefusion.json import read_json, write_json
from facefusion.typing import Args, Job, JobSet, JobStatus, JobStep, JobStepStatus from facefusion.types import Args, Job, JobSet, JobStatus, JobStep, JobStepStatus
JOBS_PATH : Optional[str] = None JOBS_PATH : Optional[str] = None
@@ -48,14 +48,17 @@ def submit_job(job_id : str) -> bool:
return False return False
def submit_jobs() -> bool: def submit_jobs(halt_on_error : bool) -> bool:
drafted_job_ids = find_job_ids('drafted') drafted_job_ids = find_job_ids('drafted')
has_error = False
if drafted_job_ids: if drafted_job_ids:
for job_id in drafted_job_ids: for job_id in drafted_job_ids:
if not submit_job(job_id): if not submit_job(job_id):
return False has_error = True
return True if halt_on_error:
return False
return not has_error
return False return False
@@ -63,24 +66,27 @@ def delete_job(job_id : str) -> bool:
return delete_job_file(job_id) return delete_job_file(job_id)
def delete_jobs() -> bool: def delete_jobs(halt_on_error : bool) -> bool:
job_ids = find_job_ids('drafted') + find_job_ids('queued') + find_job_ids('failed') + find_job_ids('completed') job_ids = find_job_ids('drafted') + find_job_ids('queued') + find_job_ids('failed') + find_job_ids('completed')
has_error = False
if job_ids: if job_ids:
for job_id in job_ids: for job_id in job_ids:
if not delete_job(job_id): if not delete_job(job_id):
return False has_error = True
return True if halt_on_error:
return False
return not has_error
return False return False
def find_jobs(job_status : JobStatus) -> JobSet: def find_jobs(job_status : JobStatus) -> JobSet:
job_ids = find_job_ids(job_status) job_ids = find_job_ids(job_status)
jobs : JobSet = {} job_set : JobSet = {}
for job_id in job_ids: for job_id in job_ids:
jobs[job_id] = read_job_file(job_id) job_set[job_id] = read_job_file(job_id)
return jobs return job_set
def find_job_ids(job_status : JobStatus) -> List[str]: def find_job_ids(job_status : JobStatus) -> List[str]:
@@ -90,7 +96,7 @@ def find_job_ids(job_status : JobStatus) -> List[str]:
job_ids = [] job_ids = []
for job_path in job_paths: for job_path in job_paths:
job_id, _ = os.path.splitext(os.path.basename(job_path)) job_id = get_file_name(job_path)
job_ids.append(job_id) job_ids.append(job_id)
return job_ids return job_ids
@@ -182,7 +188,6 @@ def set_step_status(job_id : str, step_index : int, step_status : JobStepStatus)
if job: if job:
steps = job.get('steps') steps = job.get('steps')
if has_step(job_id, step_index): if has_step(job_id, step_index):
steps[step_index]['status'] = step_status steps[step_index]['status'] = step_status
return update_job_file(job_id, job) return update_job_file(job_id, job)

View File

@@ -1,7 +1,7 @@
from facefusion.ffmpeg import concat_video from facefusion.ffmpeg import concat_video
from facefusion.filesystem import is_image, is_video, move_file, remove_file from facefusion.filesystem import are_images, are_videos, move_file, remove_file
from facefusion.jobs import job_helper, job_manager from facefusion.jobs import job_helper, job_manager
from facefusion.typing import JobOutputSet, JobStep, ProcessStep from facefusion.types import JobOutputSet, JobStep, ProcessStep
def run_job(job_id : str, process_step : ProcessStep) -> bool: def run_job(job_id : str, process_step : ProcessStep) -> bool:
@@ -16,14 +16,17 @@ def run_job(job_id : str, process_step : ProcessStep) -> bool:
return False return False
def run_jobs(process_step : ProcessStep) -> bool: def run_jobs(process_step : ProcessStep, halt_on_error : bool) -> bool:
queued_job_ids = job_manager.find_job_ids('queued') queued_job_ids = job_manager.find_job_ids('queued')
has_error = False
if queued_job_ids: if queued_job_ids:
for job_id in queued_job_ids: for job_id in queued_job_ids:
if not run_job(job_id, process_step): if not run_job(job_id, process_step):
return False has_error = True
return True if halt_on_error:
return False
return not has_error
return False return False
@@ -35,14 +38,17 @@ def retry_job(job_id : str, process_step : ProcessStep) -> bool:
return False return False
def retry_jobs(process_step : ProcessStep) -> bool: def retry_jobs(process_step : ProcessStep, halt_on_error : bool) -> bool:
failed_job_ids = job_manager.find_job_ids('failed') failed_job_ids = job_manager.find_job_ids('failed')
has_error = False
if failed_job_ids: if failed_job_ids:
for job_id in failed_job_ids: for job_id in failed_job_ids:
if not retry_job(job_id, process_step): if not retry_job(job_id, process_step):
return False has_error = True
return True if halt_on_error:
return False
return not has_error
return False return False
@@ -73,10 +79,10 @@ def finalize_steps(job_id : str) -> bool:
output_set = collect_output_set(job_id) output_set = collect_output_set(job_id)
for output_path, temp_output_paths in output_set.items(): for output_path, temp_output_paths in output_set.items():
if all(map(is_video, temp_output_paths)): if are_videos(temp_output_paths):
if not concat_video(output_path, temp_output_paths): if not concat_video(output_path, temp_output_paths):
return False return False
if any(map(is_image, temp_output_paths)): if are_images(temp_output_paths):
for temp_output_path in temp_output_paths: for temp_output_path in temp_output_paths:
if not move_file(temp_output_path, output_path): if not move_file(temp_output_path, output_path):
return False return False
@@ -95,12 +101,12 @@ def clean_steps(job_id: str) -> bool:
def collect_output_set(job_id : str) -> JobOutputSet: def collect_output_set(job_id : str) -> JobOutputSet:
steps = job_manager.get_steps(job_id) steps = job_manager.get_steps(job_id)
output_set : JobOutputSet = {} job_output_set : JobOutputSet = {}
for index, step in enumerate(steps): for index, step in enumerate(steps):
output_path = step.get('args').get('output_path') output_path = step.get('args').get('output_path')
if output_path: if output_path:
step_output_path = job_manager.get_step_output_path(job_id, index, output_path) step_output_path = job_manager.get_step_output_path(job_id, index, output_path)
output_set.setdefault(output_path, []).append(step_output_path) job_output_set.setdefault(output_path, []).append(step_output_path)
return output_set return job_output_set

View File

@@ -1,6 +1,6 @@
from typing import List from typing import List
from facefusion.typing import JobStore from facefusion.types import JobStore
JOB_STORE : JobStore =\ JOB_STORE : JobStore =\
{ {

View File

@@ -3,13 +3,13 @@ from json import JSONDecodeError
from typing import Optional from typing import Optional
from facefusion.filesystem import is_file from facefusion.filesystem import is_file
from facefusion.typing import Content from facefusion.types import Content
def read_json(json_path : str) -> Optional[Content]: def read_json(json_path : str) -> Optional[Content]:
if is_file(json_path): if is_file(json_path):
try: try:
with open(json_path, 'r') as json_file: with open(json_path) as json_file:
return json.load(json_file) return json.load(json_file)
except JSONDecodeError: except JSONDecodeError:
pass pass

View File

@@ -1,9 +1,8 @@
from logging import Logger, basicConfig, getLogger from logging import Logger, basicConfig, getLogger
from typing import Tuple
import facefusion.choices import facefusion.choices
from facefusion.common_helper import get_first, get_last from facefusion.common_helper import get_first, get_last
from facefusion.typing import LogLevel, TableContents, TableHeaders from facefusion.types import LogLevel
def init(log_level : LogLevel) -> None: def init(log_level : LogLevel) -> None:
@@ -32,46 +31,15 @@ def error(message : str, module_name : str) -> None:
def create_message(message : str, module_name : str) -> str: def create_message(message : str, module_name : str) -> str:
scopes = module_name.split('.') module_names = module_name.split('.')
first_scope = get_first(scopes) first_module_name = get_first(module_names)
last_scope = get_last(scopes) last_module_name = get_last(module_names)
if first_scope and last_scope: if first_module_name and last_module_name:
return '[' + first_scope.upper() + '.' + last_scope.upper() + '] ' + message return '[' + first_module_name.upper() + '.' + last_module_name.upper() + '] ' + message
return message return message
def table(headers : TableHeaders, contents : TableContents) -> None:
package_logger = get_package_logger()
table_column, table_separator = create_table_parts(headers, contents)
package_logger.info(table_separator)
package_logger.info(table_column.format(*headers))
package_logger.info(table_separator)
for content in contents:
content = [ value if value else '' for value in content ]
package_logger.info(table_column.format(*content))
package_logger.info(table_separator)
def create_table_parts(headers : TableHeaders, contents : TableContents) -> Tuple[str, str]:
column_parts = []
separator_parts = []
widths = [ len(header) for header in headers ]
for content in contents:
for index, value in enumerate(content):
widths[index] = max(widths[index], len(str(value)))
for width in widths:
column_parts.append('{:<' + str(width) + '}')
separator_parts.append('-' * width)
return '| ' + ' | '.join(column_parts) + ' |', '+-' + '-+-'.join(separator_parts) + '-+'
def enable() -> None: def enable() -> None:
get_package_logger().disabled = False get_package_logger().disabled = False

View File

@@ -4,8 +4,8 @@ METADATA =\
{ {
'name': 'FaceFusion', 'name': 'FaceFusion',
'description': 'Industry leading face manipulation platform', 'description': 'Industry leading face manipulation platform',
'version': '3.1.2', 'version': '3.2.0',
'license': 'MIT', 'license': 'OpenRAIL-AS',
'author': 'Henry Ruhs', 'author': 'Henry Ruhs',
'url': 'https://facefusion.io' 'url': 'https://facefusion.io'
} }

View File

@@ -2,7 +2,7 @@ from functools import lru_cache
import onnx import onnx
from facefusion.typing import ModelInitializer from facefusion.types import ModelInitializer
@lru_cache(maxsize = None) @lru_cache(maxsize = None)

View File

@@ -1,6 +1,6 @@
from typing import List, Optional from typing import List, Optional
from facefusion.typing import Fps, Padding from facefusion.types import Fps, Padding
def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]: def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:

View File

@@ -1,6 +1,6 @@
from typing import Generator, List from typing import Generator, List
from facefusion.typing import ProcessState, QueuePayload from facefusion.types import ProcessState, QueuePayload
PROCESS_STATE : ProcessState = 'pending' PROCESS_STATE : ProcessState = 'pending'

View File

@@ -1,8 +1,8 @@
from typing import List, Sequence from typing import List, Sequence
from facefusion.common_helper import create_float_range, create_int_range from facefusion.common_helper import create_float_range, create_int_range
from facefusion.filesystem import list_directory, resolve_relative_path from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path
from facefusion.processors.typing import AgeModifierModel, DeepSwapperModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel from facefusion.processors.types import AgeModifierModel, DeepSwapperModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ] age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ]
deep_swapper_models : List[DeepSwapperModel] =\ deep_swapper_models : List[DeepSwapperModel] =\
@@ -75,6 +75,7 @@ deep_swapper_models : List[DeepSwapperModel] =\
'druuzil/seth_macfarlane_384', 'druuzil/seth_macfarlane_384',
'druuzil/thomas_cruise_320', 'druuzil/thomas_cruise_320',
'druuzil/thomas_hanks_384', 'druuzil/thomas_hanks_384',
'druuzil/william_murray_384',
'edel/emma_roberts_224', 'edel/emma_roberts_224',
'edel/ivanka_trump_224', 'edel/ivanka_trump_224',
'edel/lize_dzjabrailova_224', 'edel/lize_dzjabrailova_224',
@@ -157,12 +158,12 @@ deep_swapper_models : List[DeepSwapperModel] =\
'rumateus/taylor_swift_224' 'rumateus/taylor_swift_224'
] ]
custom_model_files = list_directory(resolve_relative_path('../.assets/models/custom')) custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
if custom_model_files: if custom_model_file_paths:
for model_file in custom_model_files: for model_file_path in custom_model_file_paths:
model_id = '/'.join([ 'custom', model_file.get('name') ]) model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
deep_swapper_models.append(model_id) deep_swapper_models.append(model_id)
expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ] expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ]

View File

@@ -9,7 +9,7 @@ from tqdm import tqdm
from facefusion import logger, state_manager, wording from facefusion import logger, state_manager, wording
from facefusion.exit_helper import hard_exit from facefusion.exit_helper import hard_exit
from facefusion.typing import ProcessFrames, QueuePayload from facefusion.types import ProcessFrames, QueuePayload
PROCESSORS_METHODS =\ PROCESSORS_METHODS =\
[ [

View File

@@ -3,7 +3,7 @@ from typing import Tuple
import numpy import numpy
import scipy import scipy
from facefusion.processors.typing import LivePortraitExpression, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitYaw from facefusion.processors.types import LivePortraitExpression, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitYaw
EXPRESSION_MIN = numpy.array( EXPRESSION_MIN = numpy.array(
[ [

View File

@@ -20,10 +20,10 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
from facefusion.face_store import get_reference_faces from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import AgeModifierDirection, AgeModifierInputs from facefusion.processors.types import AgeModifierDirection, AgeModifierInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore from facefusion.thread_helper import thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import match_frame_color, read_image, read_static_image, write_image from facefusion.vision import match_frame_color, read_image, read_static_image, write_image
@@ -64,24 +64,27 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('age_modifier_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('age_modifier_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
age_modifier_model = state_manager.get_item('age_modifier_model') model_name = state_manager.get_item('age_modifier_model')
return create_static_model_set('full').get(age_modifier_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--age-modifier-model', help = wording.get('help.age_modifier_model'), default = config.get_str_value('processors.age_modifier_model', 'styleganex_age'), choices = processors_choices.age_modifier_models) group_processors.add_argument('--age-modifier-model', help = wording.get('help.age_modifier_model'), default = config.get_str_value('processors', 'age_modifier_model', 'styleganex_age'), choices = processors_choices.age_modifier_models)
group_processors.add_argument('--age-modifier-direction', help = wording.get('help.age_modifier_direction'), type = int, default = config.get_int_value('processors.age_modifier_direction', '0'), choices = processors_choices.age_modifier_direction_range, metavar = create_int_metavar(processors_choices.age_modifier_direction_range)) group_processors.add_argument('--age-modifier-direction', help = wording.get('help.age_modifier_direction'), type = int, default = config.get_int_value('processors', 'age_modifier_direction', '0'), choices = processors_choices.age_modifier_direction_range, metavar = create_int_metavar(processors_choices.age_modifier_direction_range))
facefusion.jobs.job_store.register_step_keys([ 'age_modifier_model', 'age_modifier_direction' ]) facefusion.jobs.job_store.register_step_keys([ 'age_modifier_model', 'age_modifier_direction' ])
@@ -91,10 +94,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -104,7 +107,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -145,7 +148,7 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
crop_vision_frame = prepare_vision_frame(crop_vision_frame) crop_vision_frame = prepare_vision_frame(crop_vision_frame)
extend_vision_frame = prepare_vision_frame(extend_vision_frame) extend_vision_frame = prepare_vision_frame(extend_vision_frame)
age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [-100, 100], [2.5, -2.5])).astype(numpy.float32) age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [ -100, 100 ], [ 2.5, -2.5 ])).astype(numpy.float32)
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction) extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction)
extend_vision_frame = normalize_extend_frame(extend_vision_frame) extend_vision_frame = normalize_extend_frame(extend_vision_frame)
extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame) extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame)

View File

@@ -17,12 +17,12 @@ from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_occlusion_mask, create_region_mask, create_static_box_mask from facefusion.face_masker import create_occlusion_mask, create_region_mask, create_static_box_mask
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
from facefusion.face_store import get_reference_faces from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, is_image, is_video, list_directory, resolve_relative_path, same_file_extension from facefusion.filesystem import get_file_name, in_directory, is_image, is_video, resolve_file_paths, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import DeepSwapperInputs, DeepSwapperMorph from facefusion.processors.types import DeepSwapperInputs, DeepSwapperMorph
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore from facefusion.thread_helper import thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import conditional_match_frame_color, read_image, read_static_image, write_image from facefusion.vision import conditional_match_frame_color, read_image, read_static_image, write_image
@@ -101,6 +101,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
('druuzil', 'seth_macfarlane_384'), ('druuzil', 'seth_macfarlane_384'),
('druuzil', 'thomas_cruise_320'), ('druuzil', 'thomas_cruise_320'),
('druuzil', 'thomas_hanks_384'), ('druuzil', 'thomas_hanks_384'),
('druuzil', 'william_murray_384'),
('edel', 'emma_roberts_224'), ('edel', 'emma_roberts_224'),
('edel', 'ivanka_trump_224'), ('edel', 'ivanka_trump_224'),
('edel', 'lize_dzjabrailova_224'), ('edel', 'lize_dzjabrailova_224'),
@@ -216,12 +217,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'template': 'dfl_whole_face' 'template': 'dfl_whole_face'
} }
custom_model_files = list_directory(resolve_relative_path('../.assets/models/custom')) custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
if custom_model_files: if custom_model_file_paths:
for model_file in custom_model_files: for model_file_path in custom_model_file_paths:
model_id = '/'.join([ 'custom', model_file.get('name') ]) model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
model_set[model_id] =\ model_set[model_id] =\
{ {
@@ -229,7 +230,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{ {
'deep_swapper': 'deep_swapper':
{ {
'path': resolve_relative_path(model_file.get('path')) 'path': resolve_relative_path(model_file_path)
} }
}, },
'template': 'dfl_whole_face' 'template': 'dfl_whole_face'
@@ -239,33 +240,37 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('deep_swapper_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('deep_swapper_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
deep_swapper_model = state_manager.get_item('deep_swapper_model') model_name = state_manager.get_item('deep_swapper_model')
return create_static_model_set('full').get(deep_swapper_model) return create_static_model_set('full').get(model_name)
def get_model_size() -> Size: def get_model_size() -> Size:
deep_swapper = get_inference_pool().get('deep_swapper') deep_swapper = get_inference_pool().get('deep_swapper')
deep_swapper_outputs = deep_swapper.get_outputs()
for deep_swapper_output in deep_swapper_outputs: for deep_swapper_input in deep_swapper.get_inputs():
return deep_swapper_output.shape[1:3] if deep_swapper_input.name == 'in_face:0':
return deep_swapper_input.shape[1:3]
return 0, 0 return 0, 0
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors.deep_swapper_model', 'iperov/elon_musk_224'), choices = processors_choices.deep_swapper_models) group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors', 'deep_swapper_model', 'iperov/elon_musk_224'), choices = processors_choices.deep_swapper_models)
group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors.deep_swapper_morph', '80'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range)) group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors', 'deep_swapper_morph', '100'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range))
facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ]) facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ])
@@ -275,11 +280,11 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
if model_hashes and model_sources: if model_hash_set and model_source_set:
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
return True return True
@@ -290,7 +295,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -362,6 +367,7 @@ def has_morph_input() -> bool:
for deep_swapper_input in deep_swapper.get_inputs(): for deep_swapper_input in deep_swapper.get_inputs():
if deep_swapper_input.name == 'morph_value:0': if deep_swapper_input.name == 'morph_value:0':
return True return True
return False return False

View File

@@ -19,12 +19,11 @@ from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.live_portrait import create_rotation, limit_expression from facefusion.processors.live_portrait import create_rotation, limit_expression
from facefusion.processors.typing import ExpressionRestorerInputs from facefusion.processors.types import ExpressionRestorerInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
from facefusion.processors.typing import LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import get_video_frame, read_image, read_static_image, write_image from facefusion.vision import read_image, read_static_image, read_video_frame, write_image
@lru_cache(maxsize = None) @lru_cache(maxsize = None)
@@ -69,32 +68,35 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx')
} }
}, },
'template': 'arcface_128_v2', 'template': 'arcface_128',
'size': (512, 512) 'size': (512, 512)
} }
} }
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('expression_restorer_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('expression_restorer_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
expression_restorer_model = state_manager.get_item('expression_restorer_model') model_name = state_manager.get_item('expression_restorer_model')
return create_static_model_set('full').get(expression_restorer_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--expression-restorer-model', help = wording.get('help.expression_restorer_model'), default = config.get_str_value('processors.expression_restorer_model', 'live_portrait'), choices = processors_choices.expression_restorer_models) group_processors.add_argument('--expression-restorer-model', help = wording.get('help.expression_restorer_model'), default = config.get_str_value('processors', 'expression_restorer_model', 'live_portrait'), choices = processors_choices.expression_restorer_models)
group_processors.add_argument('--expression-restorer-factor', help = wording.get('help.expression_restorer_factor'), type = int, default = config.get_int_value('processors.expression_restorer_factor', '80'), choices = processors_choices.expression_restorer_factor_range, metavar = create_int_metavar(processors_choices.expression_restorer_factor_range)) group_processors.add_argument('--expression-restorer-factor', help = wording.get('help.expression_restorer_factor'), type = int, default = config.get_int_value('processors', 'expression_restorer_factor', '80'), choices = processors_choices.expression_restorer_factor_range, metavar = create_int_metavar(processors_choices.expression_restorer_factor_range))
facefusion.jobs.job_store.register_step_keys([ 'expression_restorer_model','expression_restorer_factor' ]) facefusion.jobs.job_store.register_step_keys([ 'expression_restorer_model', 'expression_restorer_factor' ])
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
@@ -103,10 +105,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -119,7 +121,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -265,7 +267,7 @@ def process_frames(source_path : List[str], queue_payloads : List[QueuePayload],
frame_number = queue_payload.get('frame_number') frame_number = queue_payload.get('frame_number')
if state_manager.get_item('trim_frame_start'): if state_manager.get_item('trim_frame_start'):
frame_number += state_manager.get_item('trim_frame_start') frame_number += state_manager.get_item('trim_frame_start')
source_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number) source_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
target_vision_path = queue_payload.get('frame_path') target_vision_path = queue_payload.get('frame_path')
target_vision_frame = read_image(target_vision_path) target_vision_frame = read_image(target_vision_path)
output_vision_frame = process_frame( output_vision_frame = process_frame(

View File

@@ -15,9 +15,9 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
from facefusion.face_store import get_reference_faces from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, same_file_extension from facefusion.filesystem import in_directory, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import FaceDebuggerInputs from facefusion.processors.types import FaceDebuggerInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, Face, InferencePool, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, write_image from facefusion.vision import read_image, read_static_image, write_image
@@ -32,7 +32,7 @@ def clear_inference_pool() -> None:
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(processors_choices.face_debugger_items)), default = config.get_str_list('processors.face_debugger_items', 'face-landmark-5/68 face-mask'), choices = processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS') group_processors.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(processors_choices.face_debugger_items)), default = config.get_str_list('processors', 'face_debugger_items', 'face-landmark-5/68 face-mask'), choices = processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
facefusion.jobs.job_store.register_step_keys([ 'face_debugger_items' ]) facefusion.jobs.job_store.register_step_keys([ 'face_debugger_items' ])
@@ -48,7 +48,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -90,7 +90,7 @@ def debug_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
cv2.line(temp_vision_frame, (x1, y1), (x1, y2), primary_light_color, 3) cv2.line(temp_vision_frame, (x1, y1), (x1, y2), primary_light_color, 3)
if 'face-mask' in face_debugger_items: if 'face-mask' in face_debugger_items:
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), 'arcface_128_v2', (512, 512)) crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), 'arcface_128', (512, 512))
inverse_matrix = cv2.invertAffineTransform(affine_matrix) inverse_matrix = cv2.invertAffineTransform(affine_matrix)
temp_size = temp_vision_frame.shape[:2][::-1] temp_size = temp_vision_frame.shape[:2][::-1]
crop_masks = [] crop_masks = []

View File

@@ -19,10 +19,10 @@ from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.live_portrait import create_rotation, limit_euler_angles, limit_expression from facefusion.processors.live_portrait import create_rotation, limit_euler_angles, limit_expression
from facefusion.processors.typing import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw from facefusion.processors.types import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, write_image from facefusion.vision import read_image, read_static_image, write_image
@@ -105,37 +105,40 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('face_editor_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('face_editor_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
face_editor_model = state_manager.get_item('face_editor_model') model_name = state_manager.get_item('face_editor_model')
return create_static_model_set('full').get(face_editor_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors.face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models) group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models)
group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors.face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range)) group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range))
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors.face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range)) group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range))
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors.face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range)) group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range))
group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors.face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range)) group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range))
group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors.face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range)) group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range))
group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors.face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range)) group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range))
group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors.face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range)) group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range))
group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors.face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range)) group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range))
group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors.face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range)) group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range))
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors.face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range)) group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range))
group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors.face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range)) group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range))
group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors.face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range)) group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range))
group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors.face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range)) group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range))
group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors.face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range)) group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range))
facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ]) facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ])
@@ -158,10 +161,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -171,7 +174,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -360,7 +363,7 @@ def edit_lip_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : F
else: else:
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 1.0 ] ]) lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 1.0 ] ])
lip_motion_points = lip_motion_points.reshape(1, -1).astype(numpy.float32) lip_motion_points = lip_motion_points.reshape(1, -1).astype(numpy.float32)
lip_motion_points = forward_retarget_lip(lip_motion_points) * numpy.abs(face_editor_lip_open_ratio) lip_motion_points = forward_retarget_lip(lip_motion_points) * numpy.abs(face_editor_lip_open_ratio)
lip_motion_points = lip_motion_points.reshape(-1, 21, 3) lip_motion_points = lip_motion_points.reshape(-1, 21, 3)
return lip_motion_points return lip_motion_points

View File

@@ -18,10 +18,10 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
from facefusion.face_store import get_reference_faces from facefusion.face_store import get_reference_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import FaceEnhancerInputs, FaceEnhancerWeight from facefusion.processors.types import FaceEnhancerInputs, FaceEnhancerWeight
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore from facefusion.thread_helper import thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, write_image from facefusion.vision import read_image, read_static_image, write_image
@@ -131,7 +131,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx')
} }
}, },
'template': 'arcface_128_v2', 'template': 'arcface_128',
'size': (256, 256) 'size': (256, 256)
}, },
'gpen_bfr_512': 'gpen_bfr_512':
@@ -222,25 +222,28 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('face_enhancer_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('face_enhancer_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
face_enhancer_model = state_manager.get_item('face_enhancer_model') model_name = state_manager.get_item('face_enhancer_model')
return create_static_model_set('full').get(face_enhancer_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('processors.face_enhancer_model', 'gfpgan_1.4'), choices = processors_choices.face_enhancer_models) group_processors.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('processors', 'face_enhancer_model', 'gfpgan_1.4'), choices = processors_choices.face_enhancer_models)
group_processors.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('processors.face_enhancer_blend', '80'), choices = processors_choices.face_enhancer_blend_range, metavar = create_int_metavar(processors_choices.face_enhancer_blend_range)) group_processors.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('processors', 'face_enhancer_blend', '80'), choices = processors_choices.face_enhancer_blend_range, metavar = create_int_metavar(processors_choices.face_enhancer_blend_range))
group_processors.add_argument('--face-enhancer-weight', help = wording.get('help.face_enhancer_weight'), type = float, default = config.get_float_value('processors.face_enhancer_weight', '1.0'), choices = processors_choices.face_enhancer_weight_range, metavar = create_float_metavar(processors_choices.face_enhancer_weight_range)) group_processors.add_argument('--face-enhancer-weight', help = wording.get('help.face_enhancer_weight'), type = float, default = config.get_float_value('processors', 'face_enhancer_weight', '1.0'), choices = processors_choices.face_enhancer_weight_range, metavar = create_float_metavar(processors_choices.face_enhancer_weight_range))
facefusion.jobs.job_store.register_step_keys([ 'face_enhancer_model', 'face_enhancer_blend', 'face_enhancer_weight' ]) facefusion.jobs.job_store.register_step_keys([ 'face_enhancer_model', 'face_enhancer_blend', 'face_enhancer_weight' ])
@@ -251,10 +254,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -264,7 +267,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -329,6 +332,7 @@ def has_weight_input() -> bool:
for deep_swapper_input in face_enhancer.get_inputs(): for deep_swapper_input in face_enhancer.get_inputs():
if deep_swapper_input.name == 'weight': if deep_swapper_input.name == 'weight':
return True return True
return False return False

View File

@@ -21,10 +21,10 @@ from facefusion.filesystem import filter_image_paths, has_image, in_directory, i
from facefusion.model_helper import get_static_model_initializer from facefusion.model_helper import get_static_model_initializer
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.pixel_boost import explode_pixel_boost, implode_pixel_boost from facefusion.processors.pixel_boost import explode_pixel_boost, implode_pixel_boost
from facefusion.processors.typing import FaceSwapperInputs from facefusion.processors.types import FaceSwapperInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, read_static_images, unpack_resolution, write_image from facefusion.vision import read_image, read_static_image, read_static_images, unpack_resolution, write_image
@@ -211,7 +211,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
} }
}, },
'type': 'inswapper', 'type': 'inswapper',
'template': 'arcface_128_v2', 'template': 'arcface_128',
'size': (128, 128), 'size': (128, 128),
'mean': [ 0.0, 0.0, 0.0 ], 'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ] 'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -235,7 +235,7 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
} }
}, },
'type': 'inswapper', 'type': 'inswapper',
'template': 'arcface_128_v2', 'template': 'arcface_128',
'size': (128, 128), 'size': (128, 128),
'mean': [ 0.0, 0.0, 0.0 ], 'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ] 'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -336,29 +336,37 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ get_model_name() ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ get_model_name() ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
face_swapper_model = state_manager.get_item('face_swapper_model') model_name = get_model_name()
return create_static_model_set('full').get(model_name)
if has_execution_provider('coreml') and face_swapper_model == 'inswapper_128_fp16':
return create_static_model_set('full').get('inswapper_128') def get_model_name() -> str:
return create_static_model_set('full').get(face_swapper_model) model_name = state_manager.get_item('face_swapper_model')
if has_execution_provider('coreml') and model_name == 'inswapper_128_fp16':
return 'inswapper_128'
return model_name
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('processors.face_swapper_model', 'inswapper_128_fp16'), choices = processors_choices.face_swapper_models) group_processors.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('processors', 'face_swapper_model', 'inswapper_128_fp16'), choices = processors_choices.face_swapper_models)
known_args, _ = program.parse_known_args() known_args, _ = program.parse_known_args()
face_swapper_pixel_boost_choices = processors_choices.face_swapper_set.get(known_args.face_swapper_model) face_swapper_pixel_boost_choices = processors_choices.face_swapper_set.get(known_args.face_swapper_model)
group_processors.add_argument('--face-swapper-pixel-boost', help = wording.get('help.face_swapper_pixel_boost'), default = config.get_str_value('processors.face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_choices)), choices = face_swapper_pixel_boost_choices) group_processors.add_argument('--face-swapper-pixel-boost', help = wording.get('help.face_swapper_pixel_boost'), default = config.get_str_value('processors', 'face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_choices)), choices = face_swapper_pixel_boost_choices)
facefusion.jobs.job_store.register_step_keys([ 'face_swapper_model', 'face_swapper_pixel_boost' ]) facefusion.jobs.job_store.register_step_keys([ 'face_swapper_model', 'face_swapper_pixel_boost' ])
@@ -368,10 +376,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -390,7 +398,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True

View File

@@ -11,12 +11,13 @@ import facefusion.processors.core as processors
from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording
from facefusion.common_helper import create_int_metavar from facefusion.common_helper import create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import FrameColorizerInputs from facefusion.processors.types import FrameColorizerInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore from facefusion.thread_helper import thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, ExecutionProvider, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, unpack_resolution, write_image from facefusion.vision import read_image, read_static_image, unpack_resolution, write_image
@@ -128,25 +129,34 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('frame_colorizer_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('frame_colorizer_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def resolve_execution_providers() -> List[ExecutionProvider]:
if has_execution_provider('coreml'):
return [ 'cpu' ]
return state_manager.get_item('execution_providers')
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
frame_colorizer_model = state_manager.get_item('frame_colorizer_model') model_name = state_manager.get_item('frame_colorizer_model')
return create_static_model_set('full').get(frame_colorizer_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--frame-colorizer-model', help = wording.get('help.frame_colorizer_model'), default = config.get_str_value('processors.frame_colorizer_model', 'ddcolor'), choices = processors_choices.frame_colorizer_models) group_processors.add_argument('--frame-colorizer-model', help = wording.get('help.frame_colorizer_model'), default = config.get_str_value('processors', 'frame_colorizer_model', 'ddcolor'), choices = processors_choices.frame_colorizer_models)
group_processors.add_argument('--frame-colorizer-size', help = wording.get('help.frame_colorizer_size'), type = str, default = config.get_str_value('processors.frame_colorizer_size', '256x256'), choices = processors_choices.frame_colorizer_sizes) group_processors.add_argument('--frame-colorizer-size', help = wording.get('help.frame_colorizer_size'), type = str, default = config.get_str_value('processors', 'frame_colorizer_size', '256x256'), choices = processors_choices.frame_colorizer_sizes)
group_processors.add_argument('--frame-colorizer-blend', help = wording.get('help.frame_colorizer_blend'), type = int, default = config.get_int_value('processors.frame_colorizer_blend', '100'), choices = processors_choices.frame_colorizer_blend_range, metavar = create_int_metavar(processors_choices.frame_colorizer_blend_range)) group_processors.add_argument('--frame-colorizer-blend', help = wording.get('help.frame_colorizer_blend'), type = int, default = config.get_int_value('processors', 'frame_colorizer_blend', '100'), choices = processors_choices.frame_colorizer_blend_range, metavar = create_int_metavar(processors_choices.frame_colorizer_blend_range))
facefusion.jobs.job_store.register_step_keys([ 'frame_colorizer_model', 'frame_colorizer_blend', 'frame_colorizer_size' ]) facefusion.jobs.job_store.register_step_keys([ 'frame_colorizer_model', 'frame_colorizer_blend', 'frame_colorizer_size' ])
@@ -157,10 +167,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -170,7 +180,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True

View File

@@ -14,10 +14,10 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.execution import has_execution_provider from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import FrameEnhancerInputs from facefusion.processors.types import FrameEnhancerInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import create_tile_frames, merge_tile_frames, read_image, read_static_image, write_image from facefusion.vision import create_tile_frames, merge_tile_frames, read_image, read_static_image, write_image
@@ -386,32 +386,40 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ get_frame_enhancer_model() ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ get_frame_enhancer_model() ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
model_name = get_frame_enhancer_model()
return create_static_model_set('full').get(model_name)
def get_frame_enhancer_model() -> str:
frame_enhancer_model = state_manager.get_item('frame_enhancer_model') frame_enhancer_model = state_manager.get_item('frame_enhancer_model')
if has_execution_provider('coreml'): if has_execution_provider('coreml'):
if frame_enhancer_model == 'real_esrgan_x2_fp16': if frame_enhancer_model == 'real_esrgan_x2_fp16':
return create_static_model_set('full').get('real_esrgan_x2') return 'real_esrgan_x2'
if frame_enhancer_model == 'real_esrgan_x4_fp16': if frame_enhancer_model == 'real_esrgan_x4_fp16':
return create_static_model_set('full').get('real_esrgan_x4') return 'real_esrgan_x4'
if frame_enhancer_model == 'real_esrgan_x8_fp16': if frame_enhancer_model == 'real_esrgan_x8_fp16':
return create_static_model_set('full').get('real_esrgan_x8') return 'real_esrgan_x8'
return create_static_model_set('full').get(frame_enhancer_model) return frame_enhancer_model
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('processors.frame_enhancer_model', 'span_kendata_x4'), choices = processors_choices.frame_enhancer_models) group_processors.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('processors', 'frame_enhancer_model', 'span_kendata_x4'), choices = processors_choices.frame_enhancer_models)
group_processors.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('processors.frame_enhancer_blend', '80'), choices = processors_choices.frame_enhancer_blend_range, metavar = create_int_metavar(processors_choices.frame_enhancer_blend_range)) group_processors.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('processors', 'frame_enhancer_blend', '80'), choices = processors_choices.frame_enhancer_blend_range, metavar = create_int_metavar(processors_choices.frame_enhancer_blend_range))
facefusion.jobs.job_store.register_step_keys([ 'frame_enhancer_model', 'frame_enhancer_blend' ]) facefusion.jobs.job_store.register_step_keys([ 'frame_enhancer_model', 'frame_enhancer_blend' ])
@@ -421,10 +429,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -434,7 +442,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True
@@ -479,7 +487,7 @@ def forward(tile_vision_frame : VisionFrame) -> VisionFrame:
def prepare_tile_frame(vision_tile_frame : VisionFrame) -> VisionFrame: def prepare_tile_frame(vision_tile_frame : VisionFrame) -> VisionFrame:
vision_tile_frame = numpy.expand_dims(vision_tile_frame[:, :, ::-1], axis = 0) vision_tile_frame = numpy.expand_dims(vision_tile_frame[:, :, ::-1], axis = 0)
vision_tile_frame = vision_tile_frame.transpose(0, 3, 1, 2) vision_tile_frame = vision_tile_frame.transpose(0, 3, 1, 2)
vision_tile_frame = vision_tile_frame.astype(numpy.float32) / 255 vision_tile_frame = vision_tile_frame.astype(numpy.float32) / 255.0
return vision_tile_frame return vision_tile_frame

View File

@@ -19,10 +19,10 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
from facefusion.face_store import get_reference_faces from facefusion.face_store import get_reference_faces
from facefusion.filesystem import filter_audio_paths, has_audio, in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.filesystem import filter_audio_paths, has_audio, in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import LipSyncerInputs from facefusion.processors.types import LipSyncerInputs
from facefusion.program_helper import find_argument_group from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import ApplyStateItem, Args, AudioFrame, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.types import ApplyStateItem, Args, AudioFrame, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import read_image, read_static_image, restrict_video_fps, write_image from facefusion.vision import read_image, read_static_image, restrict_video_fps, write_image
@@ -74,23 +74,26 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
def get_inference_pool() -> InferencePool: def get_inference_pool() -> InferencePool:
model_sources = get_model_options().get('sources') model_names = [ state_manager.get_item('lip_syncer_model') ]
return inference_manager.get_inference_pool(__name__, model_sources) model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None: def clear_inference_pool() -> None:
inference_manager.clear_inference_pool(__name__) model_names = [ state_manager.get_item('lip_syncer_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def get_model_options() -> ModelOptions: def get_model_options() -> ModelOptions:
lip_syncer_model = state_manager.get_item('lip_syncer_model') model_name = state_manager.get_item('lip_syncer_model')
return create_static_model_set('full').get(lip_syncer_model) return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None: def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors') group_processors = find_argument_group(program, 'processors')
if group_processors: if group_processors:
group_processors.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('processors.lip_syncer_model', 'wav2lip_gan_96'), choices = processors_choices.lip_syncer_models) group_processors.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('processors', 'lip_syncer_model', 'wav2lip_gan_96'), choices = processors_choices.lip_syncer_models)
facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model' ]) facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model' ])
@@ -99,10 +102,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
def pre_check() -> bool: def pre_check() -> bool:
model_hashes = get_model_options().get('hashes') model_hash_set = get_model_options().get('hashes')
model_sources = get_model_options().get('sources') model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool: def pre_process(mode : ProcessMode) -> bool:
@@ -115,7 +118,7 @@ def pre_process(mode : ProcessMode) -> bool:
if mode == 'output' and not in_directory(state_manager.get_item('output_path')): if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
return False return False
if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
return False return False
return True return True

View File

@@ -3,7 +3,7 @@ from typing import List
import numpy import numpy
from cv2.typing import Size from cv2.typing import Size
from facefusion.typing import VisionFrame from facefusion.types import VisionFrame
def implode_pixel_boost(crop_vision_frame : VisionFrame, pixel_boost_total : int, model_size : Size) -> VisionFrame: def implode_pixel_boost(crop_vision_frame : VisionFrame, pixel_boost_total : int, model_size : Size) -> VisionFrame:
@@ -13,6 +13,6 @@ def implode_pixel_boost(crop_vision_frame : VisionFrame, pixel_boost_total : int
def explode_pixel_boost(temp_vision_frames : List[VisionFrame], pixel_boost_total : int, model_size : Size, pixel_boost_size : Size) -> VisionFrame: def explode_pixel_boost(temp_vision_frames : List[VisionFrame], pixel_boost_total : int, model_size : Size, pixel_boost_size : Size) -> VisionFrame:
crop_vision_frame = numpy.stack(temp_vision_frames, axis = 0).reshape(pixel_boost_total, pixel_boost_total, model_size[0], model_size[1], 3) crop_vision_frame = numpy.stack(temp_vision_frames).reshape(pixel_boost_total, pixel_boost_total, model_size[0], model_size[1], 3)
crop_vision_frame = crop_vision_frame.transpose(2, 0, 3, 1, 4).reshape(pixel_boost_size[0], pixel_boost_size[1], 3) crop_vision_frame = crop_vision_frame.transpose(2, 0, 3, 1, 4).reshape(pixel_boost_size[0], pixel_boost_size[1], 3)
return crop_vision_frame return crop_vision_frame

View File

@@ -1,11 +1,11 @@
from typing import Any, Dict, List, Literal, TypedDict from typing import Any, Dict, List, Literal, TypeAlias, TypedDict
from numpy._typing import NDArray from numpy.typing import NDArray
from facefusion.typing import AppContext, AudioFrame, Face, FaceSet, VisionFrame from facefusion.types import AppContext, AudioFrame, Face, FaceSet, VisionFrame
AgeModifierModel = Literal['styleganex_age'] AgeModifierModel = Literal['styleganex_age']
DeepSwapperModel = str DeepSwapperModel : TypeAlias = str
ExpressionRestorerModel = Literal['live_portrait'] ExpressionRestorerModel = Literal['live_portrait']
FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race'] FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race']
FaceEditorModel = Literal['live_portrait'] FaceEditorModel = Literal['live_portrait']
@@ -15,7 +15,7 @@ FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldi
FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4'] FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4']
LipSyncerModel = Literal['wav2lip_96', 'wav2lip_gan_96'] LipSyncerModel = Literal['wav2lip_96', 'wav2lip_gan_96']
FaceSwapperSet = Dict[FaceSwapperModel, List[str]] FaceSwapperSet : TypeAlias = Dict[FaceSwapperModel, List[str]]
AgeModifierInputs = TypedDict('AgeModifierInputs', AgeModifierInputs = TypedDict('AgeModifierInputs',
{ {
@@ -141,17 +141,17 @@ ProcessorState = TypedDict('ProcessorState',
'frame_enhancer_blend' : int, 'frame_enhancer_blend' : int,
'lip_syncer_model' : LipSyncerModel 'lip_syncer_model' : LipSyncerModel
}) })
ProcessorStateSet = Dict[AppContext, ProcessorState] ProcessorStateSet : TypeAlias = Dict[AppContext, ProcessorState]
AgeModifierDirection = NDArray[Any] AgeModifierDirection : TypeAlias = NDArray[Any]
DeepSwapperMorph = NDArray[Any] DeepSwapperMorph : TypeAlias = NDArray[Any]
FaceEnhancerWeight = NDArray[Any] FaceEnhancerWeight : TypeAlias = NDArray[Any]
LivePortraitPitch = float LivePortraitPitch : TypeAlias = float
LivePortraitYaw = float LivePortraitYaw : TypeAlias = float
LivePortraitRoll = float LivePortraitRoll : TypeAlias = float
LivePortraitExpression = NDArray[Any] LivePortraitExpression : TypeAlias = NDArray[Any]
LivePortraitFeatureVolume = NDArray[Any] LivePortraitFeatureVolume : TypeAlias = NDArray[Any]
LivePortraitMotionPoints = NDArray[Any] LivePortraitMotionPoints : TypeAlias = NDArray[Any]
LivePortraitRotation = NDArray[Any] LivePortraitRotation : TypeAlias = NDArray[Any]
LivePortraitScale = NDArray[Any] LivePortraitScale : TypeAlias = NDArray[Any]
LivePortraitTranslation = NDArray[Any] LivePortraitTranslation : TypeAlias = NDArray[Any]

View File

@@ -3,9 +3,10 @@ from argparse import ArgumentParser, HelpFormatter
import facefusion.choices import facefusion.choices
from facefusion import config, metadata, state_manager, wording from facefusion import config, metadata, state_manager, wording
from facefusion.common_helper import create_float_metavar, create_int_metavar, get_last from facefusion.common_helper import create_float_metavar, create_int_metavar, get_first, get_last
from facefusion.execution import get_available_execution_providers from facefusion.execution import get_available_execution_providers
from facefusion.filesystem import list_directory from facefusion.ffmpeg import get_available_encoder_set
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.jobs import job_store from facefusion.jobs import job_store
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
@@ -30,7 +31,7 @@ def create_config_path_program() -> ArgumentParser:
def create_temp_path_program() -> ArgumentParser: def create_temp_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths') group_paths = program.add_argument_group('paths')
group_paths.add_argument('--temp-path', help = wording.get('help.temp_path'), default = config.get_str_value('paths.temp_path', tempfile.gettempdir())) group_paths.add_argument('--temp-path', help = wording.get('help.temp_path'), default = config.get_str_value('paths', 'temp_path', tempfile.gettempdir()))
job_store.register_job_keys([ 'temp_path' ]) job_store.register_job_keys([ 'temp_path' ])
return program return program
@@ -38,7 +39,7 @@ def create_temp_path_program() -> ArgumentParser:
def create_jobs_path_program() -> ArgumentParser: def create_jobs_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths') group_paths = program.add_argument_group('paths')
group_paths.add_argument('--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths.jobs_path', '.jobs')) group_paths.add_argument('--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths', 'jobs_path', '.jobs'))
job_store.register_job_keys([ 'jobs_path' ]) job_store.register_job_keys([ 'jobs_path' ])
return program return program
@@ -46,7 +47,7 @@ def create_jobs_path_program() -> ArgumentParser:
def create_source_paths_program() -> ArgumentParser: def create_source_paths_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths') group_paths = program.add_argument_group('paths')
group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), default = config.get_str_list('paths.source_paths'), nargs = '+') group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), default = config.get_str_list('paths', 'source_paths'), nargs = '+')
job_store.register_step_keys([ 'source_paths' ]) job_store.register_step_keys([ 'source_paths' ])
return program return program
@@ -54,7 +55,7 @@ def create_source_paths_program() -> ArgumentParser:
def create_target_path_program() -> ArgumentParser: def create_target_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths') group_paths = program.add_argument_group('paths')
group_paths.add_argument('-t', '--target-path', help = wording.get('help.target_path'), default = config.get_str_value('paths.target_path')) group_paths.add_argument('-t', '--target-path', help = wording.get('help.target_path'), default = config.get_str_value('paths', 'target_path'))
job_store.register_step_keys([ 'target_path' ]) job_store.register_step_keys([ 'target_path' ])
return program return program
@@ -62,7 +63,7 @@ def create_target_path_program() -> ArgumentParser:
def create_output_path_program() -> ArgumentParser: def create_output_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths') group_paths = program.add_argument_group('paths')
group_paths.add_argument('-o', '--output-path', help = wording.get('help.output_path'), default = config.get_str_value('paths.output_path')) group_paths.add_argument('-o', '--output-path', help = wording.get('help.output_path'), default = config.get_str_value('paths', 'output_path'))
job_store.register_step_keys([ 'output_path' ]) job_store.register_step_keys([ 'output_path' ])
return program return program
@@ -70,7 +71,7 @@ def create_output_path_program() -> ArgumentParser:
def create_source_pattern_program() -> ArgumentParser: def create_source_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns') group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-s', '--source-pattern', help = wording.get('help.source_pattern'), default = config.get_str_value('patterns.source_pattern')) group_patterns.add_argument('-s', '--source-pattern', help = wording.get('help.source_pattern'), default = config.get_str_value('patterns', 'source_pattern'))
job_store.register_job_keys([ 'source_pattern' ]) job_store.register_job_keys([ 'source_pattern' ])
return program return program
@@ -78,7 +79,7 @@ def create_source_pattern_program() -> ArgumentParser:
def create_target_pattern_program() -> ArgumentParser: def create_target_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns') group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-t', '--target-pattern', help = wording.get('help.target_pattern'), default = config.get_str_value('patterns.target_pattern')) group_patterns.add_argument('-t', '--target-pattern', help = wording.get('help.target_pattern'), default = config.get_str_value('patterns', 'target_pattern'))
job_store.register_job_keys([ 'target_pattern' ]) job_store.register_job_keys([ 'target_pattern' ])
return program return program
@@ -86,7 +87,7 @@ def create_target_pattern_program() -> ArgumentParser:
def create_output_pattern_program() -> ArgumentParser: def create_output_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns') group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-o', '--output-pattern', help = wording.get('help.output_pattern'), default = config.get_str_value('patterns.output_pattern')) group_patterns.add_argument('-o', '--output-pattern', help = wording.get('help.output_pattern'), default = config.get_str_value('patterns', 'output_pattern'))
job_store.register_job_keys([ 'output_pattern' ]) job_store.register_job_keys([ 'output_pattern' ])
return program return program
@@ -94,12 +95,12 @@ def create_output_pattern_program() -> ArgumentParser:
def create_face_detector_program() -> ArgumentParser: def create_face_detector_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_face_detector = program.add_argument_group('face detector') group_face_detector = program.add_argument_group('face detector')
group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_models) group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector', 'face_detector_model', 'yolo_face'), choices = facefusion.choices.face_detector_models)
known_args, _ = program.parse_known_args() known_args, _ = program.parse_known_args()
face_detector_size_choices = facefusion.choices.face_detector_set.get(known_args.face_detector_model) face_detector_size_choices = facefusion.choices.face_detector_set.get(known_args.face_detector_model)
group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector.face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices) group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector', 'face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices)
group_face_detector.add_argument('--face-detector-angles', help = wording.get('help.face_detector_angles'), type = int, default = config.get_int_list('face_detector.face_detector_angles', '0'), choices = facefusion.choices.face_detector_angles, nargs = '+', metavar = 'FACE_DETECTOR_ANGLES') group_face_detector.add_argument('--face-detector-angles', help = wording.get('help.face_detector_angles'), type = int, default = config.get_int_list('face_detector', 'face_detector_angles', '0'), choices = facefusion.choices.face_detector_angles, nargs = '+', metavar = 'FACE_DETECTOR_ANGLES')
group_face_detector.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_detector.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_float_metavar(facefusion.choices.face_detector_score_range)) group_face_detector.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_detector', 'face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_float_metavar(facefusion.choices.face_detector_score_range))
job_store.register_step_keys([ 'face_detector_model', 'face_detector_angles', 'face_detector_size', 'face_detector_score' ]) job_store.register_step_keys([ 'face_detector_model', 'face_detector_angles', 'face_detector_size', 'face_detector_score' ])
return program return program
@@ -107,8 +108,8 @@ def create_face_detector_program() -> ArgumentParser:
def create_face_landmarker_program() -> ArgumentParser: def create_face_landmarker_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_face_landmarker = program.add_argument_group('face landmarker') group_face_landmarker = program.add_argument_group('face landmarker')
group_face_landmarker.add_argument('--face-landmarker-model', help = wording.get('help.face_landmarker_model'), default = config.get_str_value('face_landmarker.face_landmarker_model', '2dfan4'), choices = facefusion.choices.face_landmarker_models) group_face_landmarker.add_argument('--face-landmarker-model', help = wording.get('help.face_landmarker_model'), default = config.get_str_value('face_landmarker', 'face_landmarker_model', '2dfan4'), choices = facefusion.choices.face_landmarker_models)
group_face_landmarker.add_argument('--face-landmarker-score', help = wording.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_landmarker.face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_float_metavar(facefusion.choices.face_landmarker_score_range)) group_face_landmarker.add_argument('--face-landmarker-score', help = wording.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_landmarker', 'face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_float_metavar(facefusion.choices.face_landmarker_score_range))
job_store.register_step_keys([ 'face_landmarker_model', 'face_landmarker_score' ]) job_store.register_step_keys([ 'face_landmarker_model', 'face_landmarker_score' ])
return program return program
@@ -116,15 +117,15 @@ def create_face_landmarker_program() -> ArgumentParser:
def create_face_selector_program() -> ArgumentParser: def create_face_selector_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_face_selector = program.add_argument_group('face selector') group_face_selector = program.add_argument_group('face selector')
group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes) group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector', 'face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--face-selector-order', help = wording.get('help.face_selector_order'), default = config.get_str_value('face_selector.face_selector_order', 'large-small'), choices = facefusion.choices.face_selector_orders) group_face_selector.add_argument('--face-selector-order', help = wording.get('help.face_selector_order'), default = config.get_str_value('face_selector', 'face_selector_order', 'large-small'), choices = facefusion.choices.face_selector_orders)
group_face_selector.add_argument('--face-selector-age-start', help = wording.get('help.face_selector_age_start'), type = int, default = config.get_int_value('face_selector.face_selector_age_start'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range)) group_face_selector.add_argument('--face-selector-age-start', help = wording.get('help.face_selector_age_start'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_start'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-age-end', help = wording.get('help.face_selector_age_end'), type = int, default = config.get_int_value('face_selector.face_selector_age_end'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range)) group_face_selector.add_argument('--face-selector-age-end', help = wording.get('help.face_selector_age_end'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_end'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-gender', help = wording.get('help.face_selector_gender'), default = config.get_str_value('face_selector.face_selector_gender'), choices = facefusion.choices.face_selector_genders) group_face_selector.add_argument('--face-selector-gender', help = wording.get('help.face_selector_gender'), default = config.get_str_value('face_selector', 'face_selector_gender'), choices = facefusion.choices.face_selector_genders)
group_face_selector.add_argument('--face-selector-race', help = wording.get('help.face_selector_race'), default = config.get_str_value('face_selector.face_selector_race'), choices = facefusion.choices.face_selector_races) group_face_selector.add_argument('--face-selector-race', help = wording.get('help.face_selector_race'), default = config.get_str_value('face_selector', 'face_selector_race'), choices = facefusion.choices.face_selector_races)
group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0')) group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector', 'reference_face_position', '0'))
group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_float_metavar(facefusion.choices.reference_face_distance_range)) group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector', 'reference_face_distance', '0.3'), choices = facefusion.choices.reference_face_distance_range, metavar = create_float_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0')) group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector', 'reference_frame_number', '0'))
job_store.register_step_keys([ 'face_selector_mode', 'face_selector_order', 'face_selector_gender', 'face_selector_race', 'face_selector_age_start', 'face_selector_age_end', 'reference_face_position', 'reference_face_distance', 'reference_frame_number' ]) job_store.register_step_keys([ 'face_selector_mode', 'face_selector_order', 'face_selector_gender', 'face_selector_race', 'face_selector_age_start', 'face_selector_age_end', 'reference_face_position', 'reference_face_distance', 'reference_frame_number' ])
return program return program
@@ -132,12 +133,12 @@ def create_face_selector_program() -> ArgumentParser:
def create_face_masker_program() -> ArgumentParser: def create_face_masker_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_face_masker = program.add_argument_group('face masker') group_face_masker = program.add_argument_group('face masker')
group_face_masker.add_argument('--face-occluder-model', help = wording.get('help.face_occluder_model'), default = config.get_str_value('face_detector.face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models) group_face_masker.add_argument('--face-occluder-model', help = wording.get('help.face_occluder_model'), default = config.get_str_value('face_masker', 'face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models)
group_face_masker.add_argument('--face-parser-model', help = wording.get('help.face_parser_model'), default = config.get_str_value('face_detector.face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models) group_face_masker.add_argument('--face-parser-model', help = wording.get('help.face_parser_model'), default = config.get_str_value('face_masker', 'face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models)
group_face_masker.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES') group_face_masker.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker', 'face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_masker.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range)) group_face_masker.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker', 'face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range))
group_face_masker.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_masker.face_mask_padding', '0 0 0 0'), nargs = '+') group_face_masker.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_masker', 'face_mask_padding', '0 0 0 0'), nargs = '+')
group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker', 'face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_blur', 'face_mask_padding', 'face_mask_regions' ]) job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_blur', 'face_mask_padding', 'face_mask_regions' ])
return program return program
@@ -145,35 +146,37 @@ def create_face_masker_program() -> ArgumentParser:
def create_frame_extraction_program() -> ArgumentParser: def create_frame_extraction_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_frame_extraction = program.add_argument_group('frame extraction') group_frame_extraction = program.add_argument_group('frame extraction')
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start')) group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_start'))
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end')) group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_end'))
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction.temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats) group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction', 'temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp')) group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction', 'keep_temp'))
job_store.register_step_keys([ 'trim_frame_start', 'trim_frame_end', 'temp_frame_format', 'keep_temp' ]) job_store.register_step_keys([ 'trim_frame_start', 'trim_frame_end', 'temp_frame_format', 'keep_temp' ])
return program return program
def create_output_creation_program() -> ArgumentParser: def create_output_creation_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
available_encoder_set = get_available_encoder_set()
group_output_creation = program.add_argument_group('output creation') group_output_creation = program.add_argument_group('output creation')
group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_int_metavar(facefusion.choices.output_image_quality_range)) group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation', 'output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_int_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-image-resolution', help = wording.get('help.output_image_resolution'), default = config.get_str_value('output_creation.output_image_resolution')) group_output_creation.add_argument('--output-image-resolution', help = wording.get('help.output_image_resolution'), default = config.get_str_value('output_creation', 'output_image_resolution'))
group_output_creation.add_argument('--output-audio-encoder', help = wording.get('help.output_audio_encoder'), default = config.get_str_value('output_creation.output_audio_encoder', 'aac'), choices = facefusion.choices.output_audio_encoders) group_output_creation.add_argument('--output-audio-encoder', help = wording.get('help.output_audio_encoder'), default = config.get_str_value('output_creation', 'output_audio_encoder', get_first(available_encoder_set.get('audio'))), choices = available_encoder_set.get('audio'))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders) group_output_creation.add_argument('--output-audio-quality', help = wording.get('help.output_audio_quality'), type = int, default = config.get_int_value('output_creation', 'output_audio_quality', '80'), choices = facefusion.choices.output_audio_quality_range, metavar = create_int_metavar(facefusion.choices.output_audio_quality_range))
group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets) group_output_creation.add_argument('--output-audio-volume', help = wording.get('help.output_audio_volume'), type = int, default = config.get_int_value('output_creation', 'output_audio_volume', '100'), choices = facefusion.choices.output_audio_volume_range, metavar = create_int_metavar(facefusion.choices.output_audio_volume_range))
group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_int_metavar(facefusion.choices.output_video_quality_range)) group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation', 'output_video_encoder', get_first(available_encoder_set.get('video'))), choices = available_encoder_set.get('video'))
group_output_creation.add_argument('--output-video-resolution', help = wording.get('help.output_video_resolution'), default = config.get_str_value('output_creation.output_video_resolution')) group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation', 'output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float, default = config.get_str_value('output_creation.output_video_fps')) group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation', 'output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_int_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--skip-audio', help = wording.get('help.skip_audio'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio')) group_output_creation.add_argument('--output-video-resolution', help = wording.get('help.output_video_resolution'), default = config.get_str_value('output_creation', 'output_video_resolution'))
job_store.register_step_keys([ 'output_image_quality', 'output_image_resolution', 'output_audio_encoder', 'output_video_encoder', 'output_video_preset', 'output_video_quality', 'output_video_resolution', 'output_video_fps', 'skip_audio' ]) group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float, default = config.get_str_value('output_creation', 'output_video_fps'))
job_store.register_step_keys([ 'output_image_quality', 'output_image_resolution', 'output_audio_encoder', 'output_audio_quality', 'output_audio_volume', 'output_video_encoder', 'output_video_preset', 'output_video_quality', 'output_video_resolution', 'output_video_fps' ])
return program return program
def create_processors_program() -> ArgumentParser: def create_processors_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
group_processors = program.add_argument_group('processors') group_processors = program.add_argument_group('processors')
group_processors.add_argument('--processors', help = wording.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors.processors', 'face_swapper'), nargs = '+') group_processors.add_argument('--processors', help = wording.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors', 'processors', 'face_swapper'), nargs = '+')
job_store.register_step_keys([ 'processors' ]) job_store.register_step_keys([ 'processors' ])
for processor_module in get_processors_modules(available_processors): for processor_module in get_processors_modules(available_processors):
processor_module.register_args(program) processor_module.register_args(program)
@@ -182,11 +185,11 @@ def create_processors_program() -> ArgumentParser:
def create_uis_program() -> ArgumentParser: def create_uis_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
available_ui_layouts = [ file.get('name') for file in list_directory('facefusion/uis/layouts') ] available_ui_layouts = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/uis/layouts') ]
group_uis = program.add_argument_group('uis') group_uis = program.add_argument_group('uis')
group_uis.add_argument('--open-browser', help = wording.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis.open_browser')) group_uis.add_argument('--open-browser', help = wording.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis', 'open_browser'))
group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layouts', 'default'), nargs = '+') group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), nargs = '+')
group_uis.add_argument('--ui-workflow', help = wording.get('help.ui_workflow'), default = config.get_str_value('uis.ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows) group_uis.add_argument('--ui-workflow', help = wording.get('help.ui_workflow'), default = config.get_str_value('uis', 'ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows)
return program return program
@@ -194,19 +197,18 @@ def create_execution_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
available_execution_providers = get_available_execution_providers() available_execution_providers = get_available_execution_providers()
group_execution = program.add_argument_group('execution') group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), default = config.get_str_value('execution.execution_device_id', '0')) group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), default = config.get_str_value('execution', 'execution_device_id', '0'))
group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(available_execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = available_execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(available_execution_providers)), default = config.get_str_list('execution', 'execution_providers', get_first(available_execution_providers)), choices = available_execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range)) group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution', 'execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_int_metavar(facefusion.choices.execution_queue_count_range)) group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution', 'execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_int_metavar(facefusion.choices.execution_queue_count_range))
job_store.register_job_keys([ 'execution_device_id', 'execution_providers', 'execution_thread_count', 'execution_queue_count' ]) job_store.register_job_keys([ 'execution_device_id', 'execution_providers', 'execution_thread_count', 'execution_queue_count' ])
return program return program
def create_download_providers_program() -> ArgumentParser: def create_download_providers_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
download_providers = list(facefusion.choices.download_provider_set.keys())
group_download = program.add_argument_group('download') group_download = program.add_argument_group('download')
group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(download_providers)), default = config.get_str_list('download.download_providers', ' '.join(facefusion.choices.download_providers)), choices = download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS') group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(facefusion.choices.download_providers)), default = config.get_str_list('download', 'download_providers', ' '.join(facefusion.choices.download_providers)), choices = facefusion.choices.download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS')
job_store.register_job_keys([ 'download_providers' ]) job_store.register_job_keys([ 'download_providers' ])
return program return program
@@ -214,7 +216,7 @@ def create_download_providers_program() -> ArgumentParser:
def create_download_scope_program() -> ArgumentParser: def create_download_scope_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download') group_download = program.add_argument_group('download')
group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download.download_scope', 'lite'), choices = facefusion.choices.download_scopes) group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download', 'download_scope', 'lite'), choices = facefusion.choices.download_scopes)
job_store.register_job_keys([ 'download_scope' ]) job_store.register_job_keys([ 'download_scope' ])
return program return program
@@ -222,21 +224,28 @@ def create_download_scope_program() -> ArgumentParser:
def create_memory_program() -> ArgumentParser: def create_memory_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
group_memory = program.add_argument_group('memory') group_memory = program.add_argument_group('memory')
group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies) group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory', 'video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_int_metavar(facefusion.choices.system_memory_limit_range)) group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory', 'system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_int_metavar(facefusion.choices.system_memory_limit_range))
job_store.register_job_keys([ 'video_memory_strategy', 'system_memory_limit' ]) job_store.register_job_keys([ 'video_memory_strategy', 'system_memory_limit' ])
return program return program
def create_misc_program() -> ArgumentParser: def create_log_level_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
log_level_keys = list(facefusion.choices.log_level_set.keys())
group_misc = program.add_argument_group('misc') group_misc = program.add_argument_group('misc')
group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = log_level_keys) group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc', 'log_level', 'info'), choices = facefusion.choices.log_levels)
job_store.register_job_keys([ 'log_level' ]) job_store.register_job_keys([ 'log_level' ])
return program return program
def create_halt_on_error_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_misc = program.add_argument_group('misc')
group_misc.add_argument('--halt-on-error', help = wording.get('help.halt_on_error'), action = 'store_true', default = config.get_bool_value('misc', 'halt_on_error'))
job_store.register_job_keys([ 'halt_on_error' ])
return program
def create_job_id_program() -> ArgumentParser: def create_job_id_program() -> ArgumentParser:
program = ArgumentParser(add_help = False) program = ArgumentParser(add_help = False)
program.add_argument('job_id', help = wording.get('help.job_id')) program.add_argument('job_id', help = wording.get('help.job_id'))
@@ -257,11 +266,11 @@ def create_step_index_program() -> ArgumentParser:
def collect_step_program() -> ArgumentParser: def collect_step_program() -> ArgumentParser:
return ArgumentParser(parents= [ create_face_detector_program(), create_face_landmarker_program(), create_face_selector_program(), create_face_masker_program(), create_frame_extraction_program(), create_output_creation_program(), create_processors_program() ], add_help = False) return ArgumentParser(parents = [ create_face_detector_program(), create_face_landmarker_program(), create_face_selector_program(), create_face_masker_program(), create_frame_extraction_program(), create_output_creation_program(), create_processors_program() ], add_help = False)
def collect_job_program() -> ArgumentParser: def collect_job_program() -> ArgumentParser:
return ArgumentParser(parents= [ create_execution_program(), create_download_providers_program(), create_memory_program(), create_misc_program() ], add_help = False) return ArgumentParser(parents = [ create_execution_program(), create_download_providers_program(), create_memory_program(), create_log_level_program() ], add_help = False)
def create_program() -> ArgumentParser: def create_program() -> ArgumentParser:
@@ -273,24 +282,24 @@ def create_program() -> ArgumentParser:
sub_program.add_parser('run', help = wording.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('run', help = wording.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
# job manager # job manager
sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
# job runner # job runner
sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
return ArgumentParser(parents = [ program ], formatter_class = create_help_formatter_small, add_help = True) return ArgumentParser(parents = [ program ], formatter_class = create_help_formatter_small)
def apply_config_path(program : ArgumentParser) -> None: def apply_config_path(program : ArgumentParser) -> None:

View File

@@ -1,37 +1,37 @@
from typing import Any, Union from typing import Any, Union
from facefusion.app_context import detect_app_context from facefusion.app_context import detect_app_context
from facefusion.processors.typing import ProcessorState, ProcessorStateKey from facefusion.processors.types import ProcessorState, ProcessorStateKey, ProcessorStateSet
from facefusion.typing import State, StateKey, StateSet from facefusion.types import State, StateKey, StateSet
STATES : Union[StateSet, ProcessorState] =\ STATE_SET : Union[StateSet, ProcessorStateSet] =\
{ {
'cli': {}, #type:ignore[typeddict-item] 'cli': {}, #type:ignore[assignment]
'ui': {} #type:ignore[typeddict-item] 'ui': {} #type:ignore[assignment]
} }
def get_state() -> Union[State, ProcessorState]: def get_state() -> Union[State, ProcessorState]:
app_context = detect_app_context() app_context = detect_app_context()
return STATES.get(app_context) #type:ignore return STATE_SET.get(app_context)
def init_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None: def init_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None:
STATES['cli'][key] = value #type:ignore STATE_SET['cli'][key] = value #type:ignore[literal-required]
STATES['ui'][key] = value #type:ignore STATE_SET['ui'][key] = value #type:ignore[literal-required]
def get_item(key : Union[StateKey, ProcessorStateKey]) -> Any: def get_item(key : Union[StateKey, ProcessorStateKey]) -> Any:
return get_state().get(key) #type:ignore return get_state().get(key) #type:ignore[literal-required]
def set_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None: def set_item(key : Union[StateKey, ProcessorStateKey], value : Any) -> None:
app_context = detect_app_context() app_context = detect_app_context()
STATES[app_context][key] = value #type:ignore STATE_SET[app_context][key] = value #type:ignore[literal-required]
def sync_item(key : Union[StateKey, ProcessorStateKey]) -> None: def sync_item(key : Union[StateKey, ProcessorStateKey]) -> None:
STATES['cli'][key] = STATES.get('ui').get(key) #type:ignore STATE_SET['cli'][key] = STATE_SET.get('ui').get(key) #type:ignore[literal-required]
def clear_item(key : Union[StateKey, ProcessorStateKey]) -> None: def clear_item(key : Union[StateKey, ProcessorStateKey]) -> None:

View File

@@ -4,7 +4,7 @@ import numpy
from facefusion import logger, state_manager from facefusion import logger, state_manager
from facefusion.face_store import get_face_store from facefusion.face_store import get_face_store
from facefusion.typing import FaceSet from facefusion.types import FaceSet
def create_statistics(static_faces : FaceSet) -> Dict[str, Any]: def create_statistics(static_faces : FaceSet) -> Dict[str, Any]:

View File

@@ -2,12 +2,12 @@ import os
from typing import List from typing import List
from facefusion import state_manager from facefusion import state_manager
from facefusion.filesystem import create_directory, move_file, remove_directory, resolve_file_pattern from facefusion.filesystem import create_directory, get_file_extension, get_file_name, move_file, remove_directory, resolve_file_pattern
def get_temp_file_path(file_path : str) -> str: def get_temp_file_path(file_path : str) -> str:
_, temp_file_extension = os.path.splitext(os.path.basename(file_path))
temp_directory_path = get_temp_directory_path(file_path) temp_directory_path = get_temp_directory_path(file_path)
temp_file_extension = get_file_extension(file_path)
return os.path.join(temp_directory_path, 'temp' + temp_file_extension) return os.path.join(temp_directory_path, 'temp' + temp_file_extension)
@@ -16,8 +16,18 @@ def move_temp_file(file_path : str, move_path : str) -> bool:
return move_file(temp_file_path, move_path) return move_file(temp_file_path, move_path)
def resolve_temp_frame_paths(target_path : str) -> List[str]:
temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
return resolve_file_pattern(temp_frames_pattern)
def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
temp_directory_path = get_temp_directory_path(target_path)
return os.path.join(temp_directory_path, temp_frame_prefix + '.' + state_manager.get_item('temp_frame_format'))
def get_temp_directory_path(file_path : str) -> str: def get_temp_directory_path(file_path : str) -> str:
temp_file_name, _ = os.path.splitext(os.path.basename(file_path)) temp_file_name = get_file_name(file_path)
return os.path.join(state_manager.get_item('temp_path'), 'facefusion', temp_file_name) return os.path.join(state_manager.get_item('temp_path'), 'facefusion', temp_file_name)
@@ -31,13 +41,3 @@ def clear_temp_directory(file_path : str) -> bool:
temp_directory_path = get_temp_directory_path(file_path) temp_directory_path = get_temp_directory_path(file_path)
return remove_directory(temp_directory_path) return remove_directory(temp_directory_path)
return True return True
def get_temp_frame_paths(target_path : str) -> List[str]:
temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
return resolve_file_pattern(temp_frames_pattern)
def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
temp_directory_path = get_temp_directory_path(target_path)
return os.path.join(temp_directory_path, temp_frame_prefix + '.' + state_manager.get_item('temp_frame_format'))

View File

@@ -1,20 +1,20 @@
from collections import namedtuple from collections import namedtuple
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, TypedDict from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, TypeAlias, TypedDict
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
from onnxruntime import InferenceSession from onnxruntime import InferenceSession
Scale = float Scale : TypeAlias = float
Score = float Score : TypeAlias = float
Angle = int Angle : TypeAlias = int
Detection = NDArray[Any] Detection : TypeAlias = NDArray[Any]
Prediction = NDArray[Any] Prediction : TypeAlias = NDArray[Any]
BoundingBox = NDArray[Any] BoundingBox : TypeAlias = NDArray[Any]
FaceLandmark5 = NDArray[Any] FaceLandmark5 : TypeAlias = NDArray[Any]
FaceLandmark68 = NDArray[Any] FaceLandmark68 : TypeAlias = NDArray[Any]
FaceLandmarkSet = TypedDict('FaceLandmarkSet', FaceLandmarkSet = TypedDict('FaceLandmarkSet',
{ {
'5' : FaceLandmark5, #type:ignore[valid-type] '5' : FaceLandmark5, #type:ignore[valid-type]
@@ -27,9 +27,9 @@ FaceScoreSet = TypedDict('FaceScoreSet',
'detector' : Score, 'detector' : Score,
'landmarker' : Score 'landmarker' : Score
}) })
Embedding = NDArray[numpy.float64] Embedding : TypeAlias = NDArray[numpy.float64]
Gender = Literal['female', 'male'] Gender = Literal['female', 'male']
Age = range Age : TypeAlias = range
Race = Literal['white', 'black', 'latino', 'asian', 'indian', 'arabic'] Race = Literal['white', 'black', 'latino', 'asian', 'indian', 'arabic']
Face = namedtuple('Face', Face = namedtuple('Face',
[ [
@@ -43,34 +43,34 @@ Face = namedtuple('Face',
'age', 'age',
'race' 'race'
]) ])
FaceSet = Dict[str, List[Face]] FaceSet : TypeAlias = Dict[str, List[Face]]
FaceStore = TypedDict('FaceStore', FaceStore = TypedDict('FaceStore',
{ {
'static_faces' : FaceSet, 'static_faces' : FaceSet,
'reference_faces' : FaceSet 'reference_faces' : FaceSet
}) })
VisionFrame = NDArray[Any] VisionFrame : TypeAlias = NDArray[Any]
Mask = NDArray[Any] Mask : TypeAlias = NDArray[Any]
Points = NDArray[Any] Points : TypeAlias = NDArray[Any]
Distance = NDArray[Any] Distance : TypeAlias = NDArray[Any]
Matrix = NDArray[Any] Matrix : TypeAlias = NDArray[Any]
Anchors = NDArray[Any] Anchors : TypeAlias = NDArray[Any]
Translation = NDArray[Any] Translation : TypeAlias = NDArray[Any]
AudioBuffer = bytes AudioBuffer : TypeAlias = bytes
Audio = NDArray[Any] Audio : TypeAlias = NDArray[Any]
AudioChunk = NDArray[Any] AudioChunk : TypeAlias = NDArray[Any]
AudioFrame = NDArray[Any] AudioFrame : TypeAlias = NDArray[Any]
Spectrogram = NDArray[Any] Spectrogram : TypeAlias = NDArray[Any]
Mel = NDArray[Any] Mel : TypeAlias = NDArray[Any]
MelFilterBank = NDArray[Any] MelFilterBank : TypeAlias = NDArray[Any]
Fps = float Fps : TypeAlias = float
Duration = float Duration : TypeAlias = float
Padding = Tuple[int, int, int, int] Padding : TypeAlias = Tuple[int, int, int, int]
Orientation = Literal['landscape', 'portrait'] Orientation = Literal['landscape', 'portrait']
Resolution = Tuple[int, int] Resolution : TypeAlias = Tuple[int, int]
ProcessState = Literal['checking', 'processing', 'stopping', 'pending'] ProcessState = Literal['checking', 'processing', 'stopping', 'pending']
QueuePayload = TypedDict('QueuePayload', QueuePayload = TypedDict('QueuePayload',
@@ -78,46 +78,65 @@ QueuePayload = TypedDict('QueuePayload',
'frame_number' : int, 'frame_number' : int,
'frame_path' : str 'frame_path' : str
}) })
Args = Dict[str, Any] Args : TypeAlias = Dict[str, Any]
UpdateProgress = Callable[[int], None] UpdateProgress : TypeAlias = Callable[[int], None]
ProcessFrames = Callable[[List[str], List[QueuePayload], UpdateProgress], None] ProcessFrames : TypeAlias = Callable[[List[str], List[QueuePayload], UpdateProgress], None]
ProcessStep = Callable[[str, int, Args], bool] ProcessStep : TypeAlias = Callable[[str, int, Args], bool]
Content = Dict[str, Any] Content : TypeAlias = Dict[str, Any]
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'dfl_whole_face', 'ffhq_512', 'mtcnn_512', 'styleganex_384'] Commands : TypeAlias = List[str]
WarpTemplateSet = Dict[WarpTemplate, NDArray[Any]]
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128', 'dfl_whole_face', 'ffhq_512', 'mtcnn_512', 'styleganex_384']
WarpTemplateSet : TypeAlias = Dict[WarpTemplate, NDArray[Any]]
ProcessMode = Literal['output', 'preview', 'stream'] ProcessMode = Literal['output', 'preview', 'stream']
ErrorCode = Literal[0, 1, 2, 3, 4] ErrorCode = Literal[0, 1, 2, 3, 4]
LogLevel = Literal['error', 'warn', 'info', 'debug'] LogLevel = Literal['error', 'warn', 'info', 'debug']
LogLevelSet = Dict[LogLevel, int] LogLevelSet : TypeAlias = Dict[LogLevel, int]
TableHeaders = List[str] TableHeaders = List[str]
TableContents = List[List[Any]] TableContents = List[List[Any]]
FaceDetectorModel = Literal['many', 'retinaface', 'scrfd', 'yoloface'] FaceDetectorModel = Literal['many', 'retinaface', 'scrfd', 'yolo_face']
FaceLandmarkerModel = Literal['many', '2dfan4', 'peppa_wutz'] FaceLandmarkerModel = Literal['many', '2dfan4', 'peppa_wutz']
FaceDetectorSet = Dict[FaceDetectorModel, List[str]] FaceDetectorSet : TypeAlias = Dict[FaceDetectorModel, List[str]]
FaceSelectorMode = Literal['many', 'one', 'reference'] FaceSelectorMode = Literal['many', 'one', 'reference']
FaceSelectorOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best'] FaceSelectorOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
FaceOccluderModel = Literal['xseg_1', 'xseg_2'] FaceOccluderModel = Literal['xseg_1', 'xseg_2', 'xseg_3']
FaceParserModel = Literal['bisenet_resnet_18', 'bisenet_resnet_34'] FaceParserModel = Literal['bisenet_resnet_18', 'bisenet_resnet_34']
FaceMaskType = Literal['box', 'occlusion', 'region'] FaceMaskType = Literal['box', 'occlusion', 'region']
FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'] FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip']
FaceMaskRegionSet = Dict[FaceMaskRegion, int] FaceMaskRegionSet : TypeAlias = Dict[FaceMaskRegion, int]
TempFrameFormat = Literal['bmp', 'jpg', 'png']
OutputAudioEncoder = Literal['aac', 'libmp3lame', 'libopus', 'libvorbis']
OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf','h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox']
OutputVideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow']
ModelOptions = Dict[str, Any] AudioFormat = Literal['flac', 'm4a', 'mp3', 'ogg', 'opus', 'wav']
ModelSet = Dict[str, ModelOptions] ImageFormat = Literal['bmp', 'jpeg', 'png', 'tiff', 'webp']
ModelInitializer = NDArray[Any] VideoFormat = Literal['avi', 'm4v', 'mkv', 'mov', 'mp4', 'webm']
TempFrameFormat = Literal['bmp', 'jpeg', 'png', 'tiff']
AudioTypeSet : TypeAlias = Dict[AudioFormat, str]
ImageTypeSet : TypeAlias = Dict[ImageFormat, str]
VideoTypeSet : TypeAlias = Dict[VideoFormat, str]
AudioEncoder = Literal['flac', 'aac', 'libmp3lame', 'libopus', 'libvorbis', 'pcm_s16le', 'pcm_s32le']
VideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox', 'rawvideo']
EncoderSet = TypedDict('EncoderSet',
{
'audio' : List[AudioEncoder],
'video' : List[VideoEncoder]
})
VideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow']
WebcamMode = Literal['inline', 'udp', 'v4l2']
StreamMode = Literal['udp', 'v4l2']
ModelOptions : TypeAlias = Dict[str, Any]
ModelSet : TypeAlias = Dict[str, ModelOptions]
ModelInitializer : TypeAlias = NDArray[Any]
ExecutionProvider = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', 'rocm', 'tensorrt'] ExecutionProvider = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', 'rocm', 'tensorrt']
ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider'] ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider']
ExecutionProviderSet = Dict[ExecutionProvider, ExecutionProviderValue] ExecutionProviderSet : TypeAlias = Dict[ExecutionProvider, ExecutionProviderValue]
InferenceSessionProvider : TypeAlias = Any
ValueAndUnit = TypedDict('ValueAndUnit', ValueAndUnit = TypedDict('ValueAndUnit',
{ {
'value' : int, 'value' : int,
@@ -161,31 +180,23 @@ ExecutionDevice = TypedDict('ExecutionDevice',
DownloadProvider = Literal['github', 'huggingface'] DownloadProvider = Literal['github', 'huggingface']
DownloadProviderValue = TypedDict('DownloadProviderValue', DownloadProviderValue = TypedDict('DownloadProviderValue',
{ {
'url' : str, 'urls' : List[str],
'path' : str 'path' : str
}) })
DownloadProviderSet = Dict[DownloadProvider, DownloadProviderValue] DownloadProviderSet : TypeAlias = Dict[DownloadProvider, DownloadProviderValue]
DownloadScope = Literal['lite', 'full'] DownloadScope = Literal['lite', 'full']
Download = TypedDict('Download', Download = TypedDict('Download',
{ {
'url' : str, 'url' : str,
'path' : str 'path' : str
}) })
DownloadSet = Dict[str, Download] DownloadSet : TypeAlias = Dict[str, Download]
VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant'] VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant']
File = TypedDict('File',
{
'name' : str,
'extension' : str,
'path': str
})
AppContext = Literal['cli', 'ui'] AppContext = Literal['cli', 'ui']
InferencePool = Dict[str, InferenceSession] InferencePool : TypeAlias = Dict[str, InferenceSession]
InferencePoolSet = Dict[AppContext, Dict[str, InferencePool]] InferencePoolSet : TypeAlias = Dict[AppContext, Dict[str, InferencePool]]
UiWorkflow = Literal['instant_runner', 'job_runner', 'job_manager'] UiWorkflow = Literal['instant_runner', 'job_runner', 'job_manager']
@@ -194,7 +205,7 @@ JobStore = TypedDict('JobStore',
'job_keys' : List[str], 'job_keys' : List[str],
'step_keys' : List[str] 'step_keys' : List[str]
}) })
JobOutputSet = Dict[str, List[str]] JobOutputSet : TypeAlias = Dict[str, List[str]]
JobStatus = Literal['drafted', 'queued', 'completed', 'failed'] JobStatus = Literal['drafted', 'queued', 'completed', 'failed']
JobStepStatus = Literal['drafted', 'queued', 'started', 'completed', 'failed'] JobStepStatus = Literal['drafted', 'queued', 'started', 'completed', 'failed']
JobStep = TypedDict('JobStep', JobStep = TypedDict('JobStep',
@@ -209,9 +220,9 @@ Job = TypedDict('Job',
'date_updated' : Optional[str], 'date_updated' : Optional[str],
'steps' : List[JobStep] 'steps' : List[JobStep]
}) })
JobSet = Dict[str, Job] JobSet : TypeAlias = Dict[str, Job]
ApplyStateItem = Callable[[Any, Any], None] ApplyStateItem : TypeAlias = Callable[[Any, Any], None]
StateKey = Literal\ StateKey = Literal\
[ [
'command', 'command',
@@ -252,12 +263,13 @@ StateKey = Literal\
'output_image_quality', 'output_image_quality',
'output_image_resolution', 'output_image_resolution',
'output_audio_encoder', 'output_audio_encoder',
'output_audio_quality',
'output_audio_volume',
'output_video_encoder', 'output_video_encoder',
'output_video_preset', 'output_video_preset',
'output_video_quality', 'output_video_quality',
'output_video_resolution', 'output_video_resolution',
'output_video_fps', 'output_video_fps',
'skip_audio',
'processors', 'processors',
'open_browser', 'open_browser',
'ui_layouts', 'ui_layouts',
@@ -271,6 +283,7 @@ StateKey = Literal\
'video_memory_strategy', 'video_memory_strategy',
'system_memory_limit', 'system_memory_limit',
'log_level', 'log_level',
'halt_on_error',
'job_id', 'job_id',
'job_status', 'job_status',
'step_index' 'step_index'
@@ -314,13 +327,14 @@ State = TypedDict('State',
'keep_temp' : bool, 'keep_temp' : bool,
'output_image_quality' : int, 'output_image_quality' : int,
'output_image_resolution' : str, 'output_image_resolution' : str,
'output_audio_encoder' : OutputAudioEncoder, 'output_audio_encoder' : AudioEncoder,
'output_video_encoder' : OutputVideoEncoder, 'output_audio_quality' : int,
'output_video_preset' : OutputVideoPreset, 'output_audio_volume' : int,
'output_video_encoder' : VideoEncoder,
'output_video_preset' : VideoPreset,
'output_video_quality' : int, 'output_video_quality' : int,
'output_video_resolution' : str, 'output_video_resolution' : str,
'output_video_fps' : float, 'output_video_fps' : float,
'skip_audio' : bool,
'processors' : List[str], 'processors' : List[str],
'open_browser' : bool, 'open_browser' : bool,
'ui_layouts' : List[str], 'ui_layouts' : List[str],
@@ -334,8 +348,10 @@ State = TypedDict('State',
'video_memory_strategy' : VideoMemoryStrategy, 'video_memory_strategy' : VideoMemoryStrategy,
'system_memory_limit' : int, 'system_memory_limit' : int,
'log_level' : LogLevel, 'log_level' : LogLevel,
'halt_on_error' : bool,
'job_id' : str, 'job_id' : str,
'job_status' : JobStatus, 'job_status' : JobStatus,
'step_index' : int 'step_index' : int
}) })
StateSet = Dict[AppContext, State] StateSet : TypeAlias = Dict[AppContext, State]

View File

@@ -1,9 +1,13 @@
:root:root:root:root .gradio-container :root:root:root:root .gradio-container
{ {
max-width: 110em;
overflow: unset; overflow: unset;
} }
:root:root:root:root main
{
max-width: 110em;
}
:root:root:root:root input[type="number"] :root:root:root:root input[type="number"]
{ {
appearance: textfield; appearance: textfield;
@@ -65,6 +69,12 @@
min-height: unset; min-height: unset;
} }
:root:root:root:root .box-face-selector .empty,
:root:root:root:root .box-face-selector .gallery-container
{
min-height: 7.375rem;
}
:root:root:root:root .tab-wrapper :root:root:root:root .tab-wrapper
{ {
padding: 0 0.625rem; padding: 0 0.625rem;

View File

@@ -1,11 +1,9 @@
from typing import List from typing import List
from facefusion.uis.typing import JobManagerAction, JobRunnerAction, WebcamMode from facefusion.uis.types import JobManagerAction, JobRunnerAction
job_manager_actions : List[JobManagerAction] = [ 'job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ] job_manager_actions : List[JobManagerAction] = [ 'job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]
job_runner_actions : List[JobRunnerAction] = [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ] job_runner_actions : List[JobRunnerAction] = [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]
common_options : List[str] = [ 'keep-temp', 'skip-audio' ] common_options : List[str] = [ 'keep-temp' ]
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_float_step from facefusion.common_helper import calc_float_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import AgeModifierModel from facefusion.processors.types import AgeModifierModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
AGE_MODIFIER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None AGE_MODIFIER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -9,14 +9,13 @@ import gradio
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.core import conditional_process from facefusion.core import conditional_process
from facefusion.filesystem import is_video from facefusion.filesystem import get_file_extension, is_video
from facefusion.memory import limit_system_memory from facefusion.memory import limit_system_memory
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
from facefusion.vision import count_video_frame_total, detect_video_fps, detect_video_resolution, pack_resolution from facefusion.vision import count_video_frame_total, detect_video_fps, detect_video_resolution, pack_resolution
BENCHMARK_BENCHMARKS_DATAFRAME : Optional[gradio.Dataframe] = None BENCHMARK_BENCHMARKS_DATAFRAME : Optional[gradio.Dataframe] = None
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
BENCHMARKS : Dict[str, str] =\ BENCHMARKS : Dict[str, str] =\
{ {
'240p': '.assets/examples/target-240p.mp4', '240p': '.assets/examples/target-240p.mp4',
@@ -32,7 +31,6 @@ BENCHMARKS : Dict[str, str] =\
def render() -> None: def render() -> None:
global BENCHMARK_BENCHMARKS_DATAFRAME global BENCHMARK_BENCHMARKS_DATAFRAME
global BENCHMARK_START_BUTTON global BENCHMARK_START_BUTTON
global BENCHMARK_CLEAR_BUTTON
BENCHMARK_BENCHMARKS_DATAFRAME = gradio.Dataframe( BENCHMARK_BENCHMARKS_DATAFRAME = gradio.Dataframe(
headers = headers =
@@ -72,8 +70,8 @@ def listen() -> None:
def suggest_output_path(target_path : str) -> Optional[str]: def suggest_output_path(target_path : str) -> Optional[str]:
if is_video(target_path): if is_video(target_path):
_, target_extension = os.path.splitext(target_path) target_file_extension = get_file_extension(target_path)
return os.path.join(tempfile.gettempdir(), hashlib.sha1().hexdigest()[:8] + target_extension) return os.path.join(tempfile.gettempdir(), hashlib.sha1().hexdigest()[:8] + target_file_extension)
return None return None
@@ -81,8 +79,8 @@ def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[
state_manager.init_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ]) state_manager.init_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ])
state_manager.init_item('face_landmarker_score', 0) state_manager.init_item('face_landmarker_score', 0)
state_manager.init_item('temp_frame_format', 'bmp') state_manager.init_item('temp_frame_format', 'bmp')
state_manager.init_item('output_audio_volume', 0)
state_manager.init_item('output_video_preset', 'ultrafast') state_manager.init_item('output_video_preset', 'ultrafast')
state_manager.init_item('skip_audio', True)
state_manager.sync_item('execution_providers') state_manager.sync_item('execution_providers')
state_manager.sync_item('execution_thread_count') state_manager.sync_item('execution_thread_count')
state_manager.sync_item('execution_queue_count') state_manager.sync_item('execution_queue_count')

View File

@@ -15,8 +15,6 @@ def render() -> None:
if state_manager.get_item('keep_temp'): if state_manager.get_item('keep_temp'):
common_options.append('keep-temp') common_options.append('keep-temp')
if state_manager.get_item('skip_audio'):
common_options.append('skip-audio')
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup( COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
label = wording.get('uis.common_options_checkbox_group'), label = wording.get('uis.common_options_checkbox_group'),
@@ -31,6 +29,4 @@ def listen() -> None:
def update(common_options : List[str]) -> None: def update(common_options : List[str]) -> None:
keep_temp = 'keep-temp' in common_options keep_temp = 'keep-temp' in common_options
skip_audio = 'skip-audio' in common_options
state_manager.set_item('keep_temp', keep_temp) state_manager.set_item('keep_temp', keep_temp)
state_manager.set_item('skip_audio', skip_audio)

View File

@@ -6,8 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_int_step from facefusion.common_helper import calc_int_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.deep_swapper import has_morph_input from facefusion.processors.types import DeepSwapperModel
from facefusion.processors.typing import DeepSwapperModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -31,7 +30,7 @@ def render() -> None:
step = calc_int_step(processors_choices.deep_swapper_morph_range), step = calc_int_step(processors_choices.deep_swapper_morph_range),
minimum = processors_choices.deep_swapper_morph_range[0], minimum = processors_choices.deep_swapper_morph_range[0],
maximum = processors_choices.deep_swapper_morph_range[-1], maximum = processors_choices.deep_swapper_morph_range[-1],
visible = has_deep_swapper and has_morph_input() visible = has_deep_swapper and load_processor_module('deep_swapper').get_inference_pool() and load_processor_module('deep_swapper').has_morph_input()
) )
register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN) register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN)
register_ui_component('deep_swapper_morph_slider', DEEP_SWAPPER_MORPH_SLIDER) register_ui_component('deep_swapper_morph_slider', DEEP_SWAPPER_MORPH_SLIDER)
@@ -48,7 +47,7 @@ def listen() -> None:
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]: def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
has_deep_swapper = 'deep_swapper' in processors has_deep_swapper = 'deep_swapper' in processors
return gradio.Dropdown(visible = has_deep_swapper), gradio.Slider(visible = has_deep_swapper and has_morph_input()) return gradio.Dropdown(visible = has_deep_swapper), gradio.Slider(visible = has_deep_swapper and load_processor_module('deep_swapper').get_inference_pool() and load_processor_module('deep_swapper').has_morph_input())
def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> Tuple[gradio.Dropdown, gradio.Slider]: def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> Tuple[gradio.Dropdown, gradio.Slider]:
@@ -57,7 +56,7 @@ def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> Tuple[gr
state_manager.set_item('deep_swapper_model', deep_swapper_model) state_manager.set_item('deep_swapper_model', deep_swapper_model)
if deep_swapper_module.pre_check(): if deep_swapper_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model')), gradio.Slider(visible = has_morph_input()) return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model')), gradio.Slider(visible = deep_swapper_module.has_morph_input())
return gradio.Dropdown(), gradio.Slider() return gradio.Dropdown(), gradio.Slider()

View File

@@ -4,9 +4,9 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording
from facefusion.filesystem import list_directory from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.typing import DownloadProvider from facefusion.types import DownloadProvider
DOWNLOAD_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None DOWNLOAD_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
@@ -36,7 +36,7 @@ def update_download_providers(download_providers : List[DownloadProvider]) -> gr
face_masker, face_masker,
voice_extractor voice_extractor
] ]
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
processor_modules = get_processors_modules(available_processors) processor_modules = get_processors_modules(available_processors)
for module in common_modules + processor_modules: for module in common_modules + processor_modules:

View File

@@ -4,9 +4,9 @@ import gradio
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording
from facefusion.execution import get_available_execution_providers from facefusion.execution import get_available_execution_providers
from facefusion.filesystem import list_directory from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.typing import ExecutionProvider from facefusion.types import ExecutionProvider
EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
@@ -36,7 +36,7 @@ def update_execution_providers(execution_providers : List[ExecutionProvider]) ->
face_recognizer, face_recognizer,
voice_extractor voice_extractor
] ]
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
processor_modules = get_processors_modules(available_processors) processor_modules = get_processors_modules(available_processors)
for module in common_modules + processor_modules: for module in common_modules + processor_modules:

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_float_step from facefusion.common_helper import calc_float_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import ExpressionRestorerModel from facefusion.processors.types import ExpressionRestorerModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
EXPRESSION_RESTORER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None EXPRESSION_RESTORER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -4,7 +4,7 @@ import gradio
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.typing import FaceDebuggerItem from facefusion.processors.types import FaceDebuggerItem
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None

View File

@@ -5,9 +5,9 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import face_detector, state_manager, wording from facefusion import face_detector, state_manager, wording
from facefusion.common_helper import calc_float_step, get_last from facefusion.common_helper import calc_float_step, get_last
from facefusion.typing import Angle, FaceDetectorModel, Score from facefusion.types import Angle, FaceDetectorModel, Score
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
from facefusion.uis.typing import ComponentOptions from facefusion.uis.types import ComponentOptions
FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_float_step from facefusion.common_helper import calc_float_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import FaceEditorModel from facefusion.processors.types import FaceEditorModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FACE_EDITOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_EDITOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -6,8 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_float_step, calc_int_step from facefusion.common_helper import calc_float_step, calc_int_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.face_enhancer import has_weight_input from facefusion.processors.types import FaceEnhancerModel
from facefusion.processors.typing import FaceEnhancerModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -41,7 +40,7 @@ def render() -> None:
step = calc_float_step(processors_choices.face_enhancer_weight_range), step = calc_float_step(processors_choices.face_enhancer_weight_range),
minimum = processors_choices.face_enhancer_weight_range[0], minimum = processors_choices.face_enhancer_weight_range[0],
maximum = processors_choices.face_enhancer_weight_range[-1], maximum = processors_choices.face_enhancer_weight_range[-1],
visible = has_face_enhancer and has_weight_input() visible = has_face_enhancer and load_processor_module('face_enhancer').get_inference_pool() and load_processor_module('face_enhancer').has_weight_input()
) )
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN) register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER) register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
@@ -60,7 +59,7 @@ def listen() -> None:
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Slider]: def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Slider]:
has_face_enhancer = 'face_enhancer' in processors has_face_enhancer = 'face_enhancer' in processors
return gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer and has_weight_input()) return gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer and load_processor_module('face_enhancer').get_inference_pool() and load_processor_module('face_enhancer').has_weight_input())
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> Tuple[gradio.Dropdown, gradio.Slider]: def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> Tuple[gradio.Dropdown, gradio.Slider]:
@@ -69,7 +68,7 @@ def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> Tuple
state_manager.set_item('face_enhancer_model', face_enhancer_model) state_manager.set_item('face_enhancer_model', face_enhancer_model)
if face_enhancer_module.pre_check(): if face_enhancer_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('face_enhancer_model')), gradio.Slider(visible = has_weight_input()) return gradio.Dropdown(value = state_manager.get_item('face_enhancer_model')), gradio.Slider(visible = face_enhancer_module.has_weight_input())
return gradio.Dropdown(), gradio.Slider() return gradio.Dropdown(), gradio.Slider()

View File

@@ -5,7 +5,7 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import face_landmarker, state_manager, wording from facefusion import face_landmarker, state_manager, wording
from facefusion.common_helper import calc_float_step from facefusion.common_helper import calc_float_step
from facefusion.typing import FaceLandmarkerModel, Score from facefusion.types import FaceLandmarkerModel, Score
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
FACE_LANDMARKER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_LANDMARKER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -5,7 +5,7 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import face_masker, state_manager, wording from facefusion import face_masker, state_manager, wording
from facefusion.common_helper import calc_float_step, calc_int_step from facefusion.common_helper import calc_float_step, calc_int_step
from facefusion.typing import FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel from facefusion.types import FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
FACE_OCCLUDER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_OCCLUDER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -10,11 +10,11 @@ from facefusion.face_analyser import get_many_faces
from facefusion.face_selector import sort_and_filter_faces from facefusion.face_selector import sort_and_filter_faces
from facefusion.face_store import clear_reference_faces, clear_static_faces from facefusion.face_store import clear_reference_faces, clear_static_faces
from facefusion.filesystem import is_image, is_video from facefusion.filesystem import is_image, is_video
from facefusion.typing import FaceSelectorMode, FaceSelectorOrder, Gender, Race, VisionFrame from facefusion.types import FaceSelectorMode, FaceSelectorOrder, Gender, Race, VisionFrame
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.typing import ComponentOptions from facefusion.uis.types import ComponentOptions
from facefusion.uis.ui_helper import convert_str_none from facefusion.uis.ui_helper import convert_str_none
from facefusion.vision import get_video_frame, normalize_frame_color, read_static_image from facefusion.vision import normalize_frame_color, read_static_image, read_video_frame
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None FACE_SELECTOR_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -38,15 +38,16 @@ def render() -> None:
{ {
'label': wording.get('uis.reference_face_gallery'), 'label': wording.get('uis.reference_face_gallery'),
'object_fit': 'cover', 'object_fit': 'cover',
'columns': 8, 'columns': 7,
'allow_preview': False, 'allow_preview': False,
'elem_classes': 'box-face-selector',
'visible': 'reference' in state_manager.get_item('face_selector_mode') 'visible': 'reference' in state_manager.get_item('face_selector_mode')
} }
if is_image(state_manager.get_item('target_path')): if is_image(state_manager.get_item('target_path')):
reference_frame = read_static_image(state_manager.get_item('target_path')) reference_frame = read_static_image(state_manager.get_item('target_path'))
reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame) reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame)
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame) reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame)
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown( FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.face_selector_mode_dropdown'), label = wording.get('uis.face_selector_mode_dropdown'),
@@ -112,7 +113,7 @@ def listen() -> None:
'target_image', 'target_image',
'target_video' 'target_video'
]): ]):
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(update_reference_face_position) getattr(ui_component, method)(update_reference_face_position)
getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
@@ -130,8 +131,9 @@ def listen() -> None:
preview_frame_slider = get_ui_component('preview_frame_slider') preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider: if preview_frame_slider:
preview_frame_slider.release(update_reference_frame_number, inputs = preview_frame_slider) for method in [ 'change', 'release' ]:
preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) getattr(preview_frame_slider, method)(update_reference_frame_number, inputs = preview_frame_slider, show_progress = 'hidden')
getattr(preview_frame_slider, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY, show_progress = 'hidden')
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]: def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
@@ -197,7 +199,7 @@ def update_reference_position_gallery() -> gradio.Gallery:
temp_vision_frame = read_static_image(state_manager.get_item('target_path')) temp_vision_frame = read_static_image(state_manager.get_item('target_path'))
gallery_vision_frames = extract_gallery_frames(temp_vision_frame) gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
gallery_vision_frames = extract_gallery_frames(temp_vision_frame) gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
if gallery_vision_frames: if gallery_vision_frames:
return gradio.Gallery(value = gallery_vision_frames) return gradio.Gallery(value = gallery_vision_frames)

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import get_first from facefusion.common_helper import get_first
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import FaceSwapperModel from facefusion.processors.types import FaceSwapperModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_int_step from facefusion.common_helper import calc_int_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import FrameColorizerModel from facefusion.processors.types import FrameColorizerModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import calc_int_step from facefusion.common_helper import calc_int_step
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import FrameEnhancerModel from facefusion.processors.types import FrameEnhancerModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -9,7 +9,7 @@ from facefusion.core import process_step
from facefusion.filesystem import is_directory, is_image, is_video from facefusion.filesystem import is_directory, is_image, is_video
from facefusion.jobs import job_helper, job_manager, job_runner, job_store from facefusion.jobs import job_helper, job_manager, job_runner, job_store
from facefusion.temp_helper import clear_temp_directory from facefusion.temp_helper import clear_temp_directory
from facefusion.typing import Args, UiWorkflow from facefusion.types import Args, UiWorkflow
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
from facefusion.uis.ui_helper import suggest_output_path from facefusion.uis.ui_helper import suggest_output_path
@@ -92,7 +92,7 @@ def create_and_run_job(step_args : Args) -> bool:
job_id = job_helper.suggest_job_id('ui') job_id = job_helper.suggest_job_id('ui')
for key in job_store.get_job_keys(): for key in job_store.get_job_keys():
state_manager.sync_item(key) #type:ignore state_manager.sync_item(key) #type:ignore[arg-type]
return job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step) return job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step)

View File

@@ -6,7 +6,7 @@ import facefusion.choices
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.common_helper import get_first from facefusion.common_helper import get_first
from facefusion.jobs import job_list, job_manager from facefusion.jobs import job_list, job_manager
from facefusion.typing import JobStatus from facefusion.types import JobStatus
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
JOB_LIST_JOBS_DATAFRAME : Optional[gradio.Dataframe] = None JOB_LIST_JOBS_DATAFRAME : Optional[gradio.Dataframe] = None

View File

@@ -6,7 +6,7 @@ import facefusion.choices
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.common_helper import get_first from facefusion.common_helper import get_first
from facefusion.jobs import job_manager from facefusion.jobs import job_manager
from facefusion.typing import JobStatus from facefusion.types import JobStatus
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
JOB_LIST_JOB_STATUS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None JOB_LIST_JOB_STATUS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None

View File

@@ -7,10 +7,10 @@ from facefusion.args import collect_step_args
from facefusion.common_helper import get_first, get_last from facefusion.common_helper import get_first, get_last
from facefusion.filesystem import is_directory from facefusion.filesystem import is_directory
from facefusion.jobs import job_manager from facefusion.jobs import job_manager
from facefusion.typing import UiWorkflow from facefusion.types import UiWorkflow
from facefusion.uis import choices as uis_choices from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
from facefusion.uis.typing import JobManagerAction from facefusion.uis.types import JobManagerAction
from facefusion.uis.ui_helper import convert_int_none, convert_str_none, suggest_output_path from facefusion.uis.ui_helper import convert_int_none, convert_str_none, suggest_output_path
JOB_MANAGER_WRAPPER : Optional[gradio.Column] = None JOB_MANAGER_WRAPPER : Optional[gradio.Column] = None
@@ -89,6 +89,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
if is_directory(step_args.get('output_path')): if is_directory(step_args.get('output_path')):
step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path')) step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path'))
if job_action == 'job-create': if job_action == 'job-create':
if created_job_id and job_manager.create_job(created_job_id): if created_job_id and job_manager.create_job(created_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
@@ -97,6 +98,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
return gradio.Dropdown(value = 'job-add-step'), gradio.Textbox(visible = False), gradio.Dropdown(value = created_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown() return gradio.Dropdown(value = 'job-add-step'), gradio.Textbox(visible = False), gradio.Dropdown(value = created_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown()
else: else:
logger.error(wording.get('job_not_created').format(job_id = created_job_id), __name__) logger.error(wording.get('job_not_created').format(job_id = created_job_id), __name__)
if job_action == 'job-submit': if job_action == 'job-submit':
if selected_job_id and job_manager.submit_job(selected_job_id): if selected_job_id and job_manager.submit_job(selected_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
@@ -105,6 +107,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown() return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown()
else: else:
logger.error(wording.get('job_not_submitted').format(job_id = selected_job_id), __name__) logger.error(wording.get('job_not_submitted').format(job_id = selected_job_id), __name__)
if job_action == 'job-delete': if job_action == 'job-delete':
if selected_job_id and job_manager.delete_job(selected_job_id): if selected_job_id and job_manager.delete_job(selected_job_id):
updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ]
@@ -113,6 +116,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown() return gradio.Dropdown(), gradio.Textbox(), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True), gradio.Dropdown()
else: else:
logger.error(wording.get('job_not_deleted').format(job_id = selected_job_id), __name__) logger.error(wording.get('job_not_deleted').format(job_id = selected_job_id), __name__)
if job_action == 'job-add-step': if job_action == 'job-add-step':
if selected_job_id and job_manager.add_step(selected_job_id, step_args): if selected_job_id and job_manager.add_step(selected_job_id, step_args):
state_manager.set_item('output_path', output_path) state_manager.set_item('output_path', output_path)
@@ -121,6 +125,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
else: else:
state_manager.set_item('output_path', output_path) state_manager.set_item('output_path', output_path)
logger.error(wording.get('job_step_not_added').format(job_id = selected_job_id), __name__) logger.error(wording.get('job_step_not_added').format(job_id = selected_job_id), __name__)
if job_action == 'job-remix-step': if job_action == 'job-remix-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remix_step(selected_job_id, selected_step_index, step_args): if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remix_step(selected_job_id, selected_step_index, step_args):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item] updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
@@ -131,6 +136,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
else: else:
state_manager.set_item('output_path', output_path) state_manager.set_item('output_path', output_path)
logger.error(wording.get('job_remix_step_not_added').format(job_id = selected_job_id, step_index = selected_step_index), __name__) logger.error(wording.get('job_remix_step_not_added').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
if job_action == 'job-insert-step': if job_action == 'job-insert-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.insert_step(selected_job_id, selected_step_index, step_args): if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.insert_step(selected_job_id, selected_step_index, step_args):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item] updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
@@ -141,6 +147,7 @@ def apply(job_action : JobManagerAction, created_job_id : str, selected_job_id :
else: else:
state_manager.set_item('output_path', output_path) state_manager.set_item('output_path', output_path)
logger.error(wording.get('job_step_not_inserted').format(job_id = selected_job_id, step_index = selected_step_index), __name__) logger.error(wording.get('job_step_not_inserted').format(job_id = selected_job_id, step_index = selected_step_index), __name__)
if job_action == 'job-remove-step': if job_action == 'job-remove-step':
if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remove_step(selected_job_id, selected_step_index): if selected_job_id and job_manager.has_step(selected_job_id, selected_step_index) and job_manager.remove_step(selected_job_id, selected_step_index):
updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item] updated_step_choices = get_step_choices(selected_job_id) or [ 'none' ] #type:ignore[list-item]
@@ -160,16 +167,19 @@ def get_step_choices(job_id : str) -> List[int]:
def update(job_action : JobManagerAction, selected_job_id : str) -> Tuple[gradio.Textbox, gradio.Dropdown, gradio.Dropdown]: def update(job_action : JobManagerAction, selected_job_id : str) -> Tuple[gradio.Textbox, gradio.Dropdown, gradio.Dropdown]:
if job_action == 'job-create': if job_action == 'job-create':
return gradio.Textbox(value = None, visible = True), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False) return gradio.Textbox(value = None, visible = True), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False)
if job_action == 'job-delete': if job_action == 'job-delete':
updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') + job_manager.find_job_ids('queued') + job_manager.find_job_ids('failed') + job_manager.find_job_ids('completed') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids) updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)
return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False) return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False)
if job_action in [ 'job-submit', 'job-add-step' ]: if job_action in [ 'job-submit', 'job-add-step' ]:
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids) updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)
return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False) return gradio.Textbox(visible = False), gradio.Dropdown(value = updated_job_id, choices = updated_job_ids, visible = True), gradio.Dropdown(visible = False)
if job_action in [ 'job-remix-step', 'job-insert-step', 'job-remove-step' ]: if job_action in [ 'job-remix-step', 'job-insert-step', 'job-remove-step' ]:
updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('drafted') or [ 'none' ]
updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids) updated_job_id = selected_job_id if selected_job_id in updated_job_ids else get_last(updated_job_ids)

View File

@@ -7,10 +7,10 @@ from facefusion import logger, process_manager, state_manager, wording
from facefusion.common_helper import get_first, get_last from facefusion.common_helper import get_first, get_last
from facefusion.core import process_step from facefusion.core import process_step
from facefusion.jobs import job_manager, job_runner, job_store from facefusion.jobs import job_manager, job_runner, job_store
from facefusion.typing import UiWorkflow from facefusion.types import UiWorkflow
from facefusion.uis import choices as uis_choices from facefusion.uis import choices as uis_choices
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
from facefusion.uis.typing import JobRunnerAction from facefusion.uis.types import JobRunnerAction
from facefusion.uis.ui_helper import convert_str_none from facefusion.uis.ui_helper import convert_str_none
JOB_RUNNER_WRAPPER : Optional[gradio.Column] = None JOB_RUNNER_WRAPPER : Optional[gradio.Column] = None
@@ -84,7 +84,7 @@ def run(job_action : JobRunnerAction, job_id : str) -> Tuple[gradio.Button, grad
job_id = convert_str_none(job_id) job_id = convert_str_none(job_id)
for key in job_store.get_job_keys(): for key in job_store.get_job_keys():
state_manager.sync_item(key) #type:ignore state_manager.sync_item(key) #type:ignore[arg-type]
if job_action == 'job-run': if job_action == 'job-run':
logger.info(wording.get('running_job').format(job_id = job_id), __name__) logger.info(wording.get('running_job').format(job_id = job_id), __name__)
@@ -95,12 +95,15 @@ def run(job_action : JobRunnerAction, job_id : str) -> Tuple[gradio.Button, grad
updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids) return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids)
if job_action == 'job-run-all': if job_action == 'job-run-all':
logger.info(wording.get('running_jobs'), __name__) logger.info(wording.get('running_jobs'), __name__)
if job_runner.run_jobs(process_step): halt_on_error = False
if job_runner.run_jobs(process_step, halt_on_error):
logger.info(wording.get('processing_jobs_succeed'), __name__) logger.info(wording.get('processing_jobs_succeed'), __name__)
else: else:
logger.info(wording.get('processing_jobs_failed'), __name__) logger.info(wording.get('processing_jobs_failed'), __name__)
if job_action == 'job-retry': if job_action == 'job-retry':
logger.info(wording.get('retrying_job').format(job_id = job_id), __name__) logger.info(wording.get('retrying_job').format(job_id = job_id), __name__)
if job_id and job_runner.retry_job(job_id, process_step): if job_id and job_runner.retry_job(job_id, process_step):
@@ -110,9 +113,11 @@ def run(job_action : JobRunnerAction, job_id : str) -> Tuple[gradio.Button, grad
updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ]
return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids) return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids)
if job_action == 'job-retry-all': if job_action == 'job-retry-all':
logger.info(wording.get('retrying_jobs'), __name__) logger.info(wording.get('retrying_jobs'), __name__)
if job_runner.retry_jobs(process_step): halt_on_error = False
if job_runner.retry_jobs(process_step, halt_on_error):
logger.info(wording.get('processing_jobs_succeed'), __name__) logger.info(wording.get('processing_jobs_succeed'), __name__)
else: else:
logger.info(wording.get('processing_jobs_failed'), __name__) logger.info(wording.get('processing_jobs_failed'), __name__)
@@ -129,6 +134,7 @@ def update_job_action(job_action : JobRunnerAction) -> gradio.Dropdown:
updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('queued') or [ 'none' ]
return gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True) return gradio.Dropdown(value = get_last(updated_job_ids), choices = updated_job_ids, visible = True)
if job_action == 'job-retry': if job_action == 'job-retry':
updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ] updated_job_ids = job_manager.find_job_ids('failed') or [ 'none' ]

View File

@@ -5,7 +5,7 @@ import gradio
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.processors import choices as processors_choices from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module from facefusion.processors.core import load_processor_module
from facefusion.processors.typing import LipSyncerModel from facefusion.processors.types import LipSyncerModel
from facefusion.uis.core import get_ui_component, register_ui_component from facefusion.uis.core import get_ui_component, register_ui_component
LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None

View File

@@ -5,7 +5,7 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.common_helper import calc_int_step from facefusion.common_helper import calc_int_step
from facefusion.typing import VideoMemoryStrategy from facefusion.types import VideoMemoryStrategy
VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None
SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None

View File

@@ -5,14 +5,17 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.common_helper import calc_int_step from facefusion.common_helper import calc_int_step
from facefusion.ffmpeg import get_available_encoder_set
from facefusion.filesystem import is_image, is_video from facefusion.filesystem import is_image, is_video
from facefusion.typing import Fps, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset from facefusion.types import AudioEncoder, Fps, VideoEncoder, VideoPreset
from facefusion.uis.core import get_ui_components, register_ui_component from facefusion.uis.core import get_ui_components, register_ui_component
from facefusion.vision import create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, pack_resolution from facefusion.vision import create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, pack_resolution
OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None
OUTPUT_IMAGE_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_IMAGE_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_AUDIO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_AUDIO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_AUDIO_QUALITY_SLIDER : Optional[gradio.Slider] = None
OUTPUT_AUDIO_VOLUME_SLIDER : Optional[gradio.Slider] = None
OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_VIDEO_PRESET_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_PRESET_DROPDOWN : Optional[gradio.Dropdown] = None
OUTPUT_VIDEO_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -24,6 +27,8 @@ def render() -> None:
global OUTPUT_IMAGE_QUALITY_SLIDER global OUTPUT_IMAGE_QUALITY_SLIDER
global OUTPUT_IMAGE_RESOLUTION_DROPDOWN global OUTPUT_IMAGE_RESOLUTION_DROPDOWN
global OUTPUT_AUDIO_ENCODER_DROPDOWN global OUTPUT_AUDIO_ENCODER_DROPDOWN
global OUTPUT_AUDIO_QUALITY_SLIDER
global OUTPUT_AUDIO_VOLUME_SLIDER
global OUTPUT_VIDEO_ENCODER_DROPDOWN global OUTPUT_VIDEO_ENCODER_DROPDOWN
global OUTPUT_VIDEO_PRESET_DROPDOWN global OUTPUT_VIDEO_PRESET_DROPDOWN
global OUTPUT_VIDEO_RESOLUTION_DROPDOWN global OUTPUT_VIDEO_RESOLUTION_DROPDOWN
@@ -32,6 +37,7 @@ def render() -> None:
output_image_resolutions = [] output_image_resolutions = []
output_video_resolutions = [] output_video_resolutions = []
available_encoder_set = get_available_encoder_set()
if is_image(state_manager.get_item('target_path')): if is_image(state_manager.get_item('target_path')):
output_image_resolution = detect_image_resolution(state_manager.get_item('target_path')) output_image_resolution = detect_image_resolution(state_manager.get_item('target_path'))
output_image_resolutions = create_image_resolutions(output_image_resolution) output_image_resolutions = create_image_resolutions(output_image_resolution)
@@ -54,13 +60,29 @@ def render() -> None:
) )
OUTPUT_AUDIO_ENCODER_DROPDOWN = gradio.Dropdown( OUTPUT_AUDIO_ENCODER_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.output_audio_encoder_dropdown'), label = wording.get('uis.output_audio_encoder_dropdown'),
choices = facefusion.choices.output_audio_encoders, choices = available_encoder_set.get('audio'),
value = state_manager.get_item('output_audio_encoder'), value = state_manager.get_item('output_audio_encoder'),
visible = is_video(state_manager.get_item('target_path')) visible = is_video(state_manager.get_item('target_path'))
) )
OUTPUT_AUDIO_QUALITY_SLIDER = gradio.Slider(
label = wording.get('uis.output_audio_quality_slider'),
value = state_manager.get_item('output_audio_quality'),
step = calc_int_step(facefusion.choices.output_audio_quality_range),
minimum = facefusion.choices.output_audio_quality_range[0],
maximum = facefusion.choices.output_audio_quality_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_AUDIO_VOLUME_SLIDER = gradio.Slider(
label = wording.get('uis.output_audio_volume_slider'),
value = state_manager.get_item('output_audio_volume'),
step = calc_int_step(facefusion.choices.output_audio_volume_range),
minimum = facefusion.choices.output_audio_volume_range[0],
maximum = facefusion.choices.output_audio_volume_range[-1],
visible = is_video(state_manager.get_item('target_path'))
)
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.output_video_encoder_dropdown'), label = wording.get('uis.output_video_encoder_dropdown'),
choices = facefusion.choices.output_video_encoders, choices = available_encoder_set.get('video'),
value = state_manager.get_item('output_video_encoder'), value = state_manager.get_item('output_video_encoder'),
visible = is_video(state_manager.get_item('target_path')) visible = is_video(state_manager.get_item('target_path'))
) )
@@ -99,6 +121,8 @@ def listen() -> None:
OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER) OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
OUTPUT_IMAGE_RESOLUTION_DROPDOWN.change(update_output_image_resolution, inputs = OUTPUT_IMAGE_RESOLUTION_DROPDOWN) OUTPUT_IMAGE_RESOLUTION_DROPDOWN.change(update_output_image_resolution, inputs = OUTPUT_IMAGE_RESOLUTION_DROPDOWN)
OUTPUT_AUDIO_ENCODER_DROPDOWN.change(update_output_audio_encoder, inputs = OUTPUT_AUDIO_ENCODER_DROPDOWN) OUTPUT_AUDIO_ENCODER_DROPDOWN.change(update_output_audio_encoder, inputs = OUTPUT_AUDIO_ENCODER_DROPDOWN)
OUTPUT_AUDIO_QUALITY_SLIDER.release(update_output_audio_quality, inputs = OUTPUT_AUDIO_QUALITY_SLIDER)
OUTPUT_AUDIO_VOLUME_SLIDER.release(update_output_audio_volume, inputs = OUTPUT_AUDIO_VOLUME_SLIDER)
OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN) OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN)
OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER) OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
@@ -110,23 +134,23 @@ def listen() -> None:
'target_image', 'target_image',
'target_video' 'target_video'
]): ]):
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_AUDIO_ENCODER_DROPDOWN, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ]) getattr(ui_component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_AUDIO_ENCODER_DROPDOWN, OUTPUT_AUDIO_QUALITY_SLIDER, OUTPUT_AUDIO_VOLUME_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ])
def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider]: def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider]:
if is_image(state_manager.get_item('target_path')): if is_image(state_manager.get_item('target_path')):
output_image_resolution = detect_image_resolution(state_manager.get_item('target_path')) output_image_resolution = detect_image_resolution(state_manager.get_item('target_path'))
output_image_resolutions = create_image_resolutions(output_image_resolution) output_image_resolutions = create_image_resolutions(output_image_resolution)
state_manager.set_item('output_image_resolution', pack_resolution(output_image_resolution)) state_manager.set_item('output_image_resolution', pack_resolution(output_image_resolution))
return gradio.Slider(visible = True), gradio.Dropdown(value = state_manager.get_item('output_image_resolution'), choices = output_image_resolutions, visible = True), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False) return gradio.Slider(visible = True), gradio.Dropdown(value = state_manager.get_item('output_image_resolution'), choices = output_image_resolutions, visible = True), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
output_video_resolution = detect_video_resolution(state_manager.get_item('target_path')) output_video_resolution = detect_video_resolution(state_manager.get_item('target_path'))
output_video_resolutions = create_video_resolutions(output_video_resolution) output_video_resolutions = create_video_resolutions(output_video_resolution)
state_manager.set_item('output_video_resolution', pack_resolution(output_video_resolution)) state_manager.set_item('output_video_resolution', pack_resolution(output_video_resolution))
state_manager.set_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path'))) state_manager.set_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path')))
return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Dropdown(value = state_manager.get_item('output_video_resolution'), choices = output_video_resolutions, visible = True), gradio.Slider(value = state_manager.get_item('output_video_fps'), visible = True) return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Slider(visible = True), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Dropdown(value = state_manager.get_item('output_video_resolution'), choices = output_video_resolutions, visible = True), gradio.Slider(value = state_manager.get_item('output_video_fps'), visible = True)
return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False) return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
def update_output_image_quality(output_image_quality : float) -> None: def update_output_image_quality(output_image_quality : float) -> None:
@@ -137,15 +161,23 @@ def update_output_image_resolution(output_image_resolution : str) -> None:
state_manager.set_item('output_image_resolution', output_image_resolution) state_manager.set_item('output_image_resolution', output_image_resolution)
def update_output_audio_encoder(output_audio_encoder : OutputAudioEncoder) -> None: def update_output_audio_encoder(output_audio_encoder : AudioEncoder) -> None:
state_manager.set_item('output_audio_encoder', output_audio_encoder) state_manager.set_item('output_audio_encoder', output_audio_encoder)
def update_output_video_encoder(output_video_encoder : OutputVideoEncoder) -> None: def update_output_audio_quality(output_audio_quality : float) -> None:
state_manager.set_item('output_audio_quality', int(output_audio_quality))
def update_output_audio_volume(output_audio_volume: float) -> None:
state_manager.set_item('output_audio_volume', int(output_audio_volume))
def update_output_video_encoder(output_video_encoder : VideoEncoder) -> None:
state_manager.set_item('output_video_encoder', output_video_encoder) state_manager.set_item('output_video_encoder', output_video_encoder)
def update_output_video_preset(output_video_preset : OutputVideoPreset) -> None: def update_output_video_preset(output_video_preset : VideoPreset) -> None:
state_manager.set_item('output_video_preset', output_video_preset) state_manager.set_item('output_video_preset', output_video_preset)

View File

@@ -15,10 +15,10 @@ from facefusion.face_selector import sort_faces_by_order
from facefusion.face_store import clear_reference_faces, clear_static_faces, get_reference_faces from facefusion.face_store import clear_reference_faces, clear_static_faces, get_reference_faces
from facefusion.filesystem import filter_audio_paths, is_image, is_video from facefusion.filesystem import filter_audio_paths, is_image, is_video
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.typing import AudioFrame, Face, FaceSet, VisionFrame from facefusion.types import AudioFrame, Face, FaceSet, VisionFrame
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.typing import ComponentOptions from facefusion.uis.types import ComponentOptions
from facefusion.vision import count_video_frame_total, detect_frame_orientation, get_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution from facefusion.vision import count_video_frame_total, detect_frame_orientation, normalize_frame_color, read_static_image, read_static_images, read_video_frame, restrict_frame
PREVIEW_IMAGE : Optional[gradio.Image] = None PREVIEW_IMAGE : Optional[gradio.Image] = None
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
@@ -60,7 +60,7 @@ def render() -> None:
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ] preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
preview_image_options['value'] = normalize_frame_color(preview_vision_frame) preview_image_options['value'] = normalize_frame_color(preview_vision_frame)
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ] preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
@@ -88,7 +88,7 @@ def listen() -> None:
'target_image', 'target_image',
'target_video' 'target_video'
]): ]):
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) getattr(ui_component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components( for ui_component in get_ui_components(
@@ -96,7 +96,7 @@ def listen() -> None:
'target_image', 'target_image',
'target_video' 'target_video'
]): ]):
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER) getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
for ui_component in get_ui_components( for ui_component in get_ui_components(
@@ -184,8 +184,8 @@ def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
def slide_preview_image(frame_number : int = 0) -> gradio.Image: def slide_preview_image(frame_number : int = 0) -> gradio.Image:
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path'), frame_number)) preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path'), frame_number))
preview_vision_frame = resize_frame_resolution(preview_vision_frame, (1024, 1024)) preview_vision_frame = restrict_frame(preview_vision_frame, (1024, 1024))
return gradio.Image(value = preview_vision_frame) return gradio.Image(value = preview_vision_frame)
return gradio.Image(value = None) return gradio.Image(value = None)
@@ -222,7 +222,7 @@ def update_preview_image(frame_number : int = 0) -> gradio.Image:
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]) return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])
if is_video(state_manager.get_item('target_path')): if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number) temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
preview_vision_frame = normalize_frame_color(preview_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame)
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]) return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])
@@ -237,7 +237,7 @@ def update_preview_frame_slider() -> gradio.Slider:
def process_preview_frame(reference_faces : FaceSet, source_face : Face, source_audio_frame : AudioFrame, target_vision_frame : VisionFrame) -> VisionFrame: def process_preview_frame(reference_faces : FaceSet, source_face : Face, source_audio_frame : AudioFrame, target_vision_frame : VisionFrame) -> VisionFrame:
target_vision_frame = resize_frame_resolution(target_vision_frame, (1024, 1024)) target_vision_frame = restrict_frame(target_vision_frame, (1024, 1024))
source_vision_frame = target_vision_frame.copy() source_vision_frame = target_vision_frame.copy()
if analyse_frame(target_vision_frame): if analyse_frame(target_vision_frame):
return cv2.GaussianBlur(target_vision_frame, (99, 99), 0) return cv2.GaussianBlur(target_vision_frame, (99, 99), 0)

View File

@@ -3,7 +3,7 @@ from typing import List, Optional
import gradio import gradio
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.filesystem import list_directory from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
@@ -39,5 +39,5 @@ def update_processors(processors : List[str]) -> gradio.CheckboxGroup:
def sort_processors(processors : List[str]) -> List[str]: def sort_processors(processors : List[str]) -> List[str]:
available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
return sorted(available_processors, key = lambda processor : processors.index(processor) if processor in processors else len(processors)) return sorted(available_processors, key = lambda processor : processors.index(processor) if processor in processors else len(processors))

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.common_helper import get_first from facefusion.common_helper import get_first
from facefusion.filesystem import filter_audio_paths, filter_image_paths, has_audio, has_image from facefusion.filesystem import filter_audio_paths, filter_image_paths, has_audio, has_image
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
from facefusion.uis.typing import File from facefusion.uis.types import File
SOURCE_FILE : Optional[gradio.File] = None SOURCE_FILE : Optional[gradio.File] = None
SOURCE_AUDIO : Optional[gradio.Audio] = None SOURCE_AUDIO : Optional[gradio.Audio] = None
@@ -23,11 +23,6 @@ def render() -> None:
SOURCE_FILE = gradio.File( SOURCE_FILE = gradio.File(
label = wording.get('uis.source_file'), label = wording.get('uis.source_file'),
file_count = 'multiple', file_count = 'multiple',
file_types =
[
'audio',
'image'
],
value = state_manager.get_item('source_paths') if has_source_audio or has_source_image else None value = state_manager.get_item('source_paths') if has_source_audio or has_source_image else None
) )
source_file_names = [ source_file_value.get('path') for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None source_file_names = [ source_file_value.get('path') for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
@@ -55,10 +50,12 @@ def update(files : List[File]) -> Tuple[gradio.Audio, gradio.Image]:
file_names = [ file.name for file in files ] if files else None file_names = [ file.name for file in files ] if files else None
has_source_audio = has_audio(file_names) has_source_audio = has_audio(file_names)
has_source_image = has_image(file_names) has_source_image = has_image(file_names)
if has_source_audio or has_source_image: if has_source_audio or has_source_image:
source_audio_path = get_first(filter_audio_paths(file_names)) source_audio_path = get_first(filter_audio_paths(file_names))
source_image_path = get_first(filter_image_paths(file_names)) source_image_path = get_first(filter_image_paths(file_names))
state_manager.set_item('source_paths', file_names) state_manager.set_item('source_paths', file_names)
return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image) return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image)
state_manager.clear_item('source_paths') state_manager.clear_item('source_paths')
return gradio.Audio(value = None, visible = False), gradio.Image(value = None, visible = False) return gradio.Audio(value = None, visible = False), gradio.Image(value = None, visible = False)

View File

@@ -4,12 +4,9 @@ import gradio
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.face_store import clear_reference_faces, clear_static_faces from facefusion.face_store import clear_reference_faces, clear_static_faces
from facefusion.filesystem import get_file_size, is_image, is_video from facefusion.filesystem import is_image, is_video
from facefusion.uis.core import register_ui_component from facefusion.uis.core import register_ui_component
from facefusion.uis.typing import ComponentOptions, File from facefusion.uis.types import ComponentOptions, File
from facefusion.vision import get_video_frame, normalize_frame_color
FILE_SIZE_LIMIT = 512 * 1024 * 1024
TARGET_FILE : Optional[gradio.File] = None TARGET_FILE : Optional[gradio.File] = None
TARGET_IMAGE : Optional[gradio.Image] = None TARGET_IMAGE : Optional[gradio.Image] = None
@@ -25,12 +22,6 @@ def render() -> None:
is_target_video = is_video(state_manager.get_item('target_path')) is_target_video = is_video(state_manager.get_item('target_path'))
TARGET_FILE = gradio.File( TARGET_FILE = gradio.File(
label = wording.get('uis.target_file'), label = wording.get('uis.target_file'),
file_count = 'single',
file_types =
[
'image',
'video'
],
value = state_manager.get_item('target_path') if is_target_image or is_target_video else None value = state_manager.get_item('target_path') if is_target_image or is_target_video else None
) )
target_image_options : ComponentOptions =\ target_image_options : ComponentOptions =\
@@ -47,13 +38,8 @@ def render() -> None:
target_image_options['value'] = TARGET_FILE.value.get('path') target_image_options['value'] = TARGET_FILE.value.get('path')
target_image_options['visible'] = True target_image_options['visible'] = True
if is_target_video: if is_target_video:
if get_file_size(state_manager.get_item('target_path')) > FILE_SIZE_LIMIT: target_video_options['value'] = TARGET_FILE.value.get('path')
preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path'))) target_video_options['visible'] = True
target_image_options['value'] = preview_vision_frame
target_image_options['visible'] = True
else:
target_video_options['value'] = TARGET_FILE.value.get('path')
target_video_options['visible'] = True
TARGET_IMAGE = gradio.Image(**target_image_options) TARGET_IMAGE = gradio.Image(**target_image_options)
TARGET_VIDEO = gradio.Video(**target_video_options) TARGET_VIDEO = gradio.Video(**target_video_options)
register_ui_component('target_image', TARGET_IMAGE) register_ui_component('target_image', TARGET_IMAGE)
@@ -67,14 +53,14 @@ def listen() -> None:
def update(file : File) -> Tuple[gradio.Image, gradio.Video]: def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
clear_reference_faces() clear_reference_faces()
clear_static_faces() clear_static_faces()
if file and is_image(file.name): if file and is_image(file.name):
state_manager.set_item('target_path', file.name) state_manager.set_item('target_path', file.name)
return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False) return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
if file and is_video(file.name): if file and is_video(file.name):
state_manager.set_item('target_path', file.name) state_manager.set_item('target_path', file.name)
if get_file_size(file.name) > FILE_SIZE_LIMIT:
preview_vision_frame = normalize_frame_color(get_video_frame(file.name))
return gradio.Image(value = preview_vision_frame, visible = True), gradio.Video(value = None, visible = False)
return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True) return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True)
state_manager.clear_item('target_path') state_manager.clear_item('target_path')
return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False) return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False)

View File

@@ -5,7 +5,7 @@ import gradio
import facefusion.choices import facefusion.choices
from facefusion import state_manager, wording from facefusion import state_manager, wording
from facefusion.filesystem import is_video from facefusion.filesystem import is_video
from facefusion.typing import TempFrameFormat from facefusion.types import TempFrameFormat
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -27,7 +27,7 @@ def listen() -> None:
target_video = get_ui_component('target_video') target_video = get_ui_component('target_video')
if target_video: if target_video:
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(target_video, method)(remote_update, outputs = TEMP_FRAME_FORMAT_DROPDOWN) getattr(target_video, method)(remote_update, outputs = TEMP_FRAME_FORMAT_DROPDOWN)

View File

@@ -9,7 +9,7 @@ from tqdm import tqdm
import facefusion.choices import facefusion.choices
from facefusion import logger, state_manager, wording from facefusion import logger, state_manager, wording
from facefusion.typing import LogLevel from facefusion.types import LogLevel
LOG_LEVEL_DROPDOWN : Optional[gradio.Dropdown] = None LOG_LEVEL_DROPDOWN : Optional[gradio.Dropdown] = None
TERMINAL_TEXTBOX : Optional[gradio.Textbox] = None TERMINAL_TEXTBOX : Optional[gradio.Textbox] = None
@@ -38,8 +38,6 @@ def render() -> None:
def listen() -> None: def listen() -> None:
global LOG_LEVEL_DROPDOWN
LOG_LEVEL_DROPDOWN.change(update_log_level, inputs = LOG_LEVEL_DROPDOWN) LOG_LEVEL_DROPDOWN.change(update_log_level, inputs = LOG_LEVEL_DROPDOWN)
logger.get_package_logger().addHandler(LOG_HANDLER) logger.get_package_logger().addHandler(LOG_HANDLER)
tqdm.update = tqdm_update tqdm.update = tqdm_update
@@ -78,5 +76,5 @@ def create_tqdm_output(self : tqdm) -> Optional[str]:
def read_logs() -> str: def read_logs() -> str:
LOG_BUFFER.seek(0) LOG_BUFFER.seek(0)
logs = LOG_BUFFER.read().rstrip() logs = LOG_BUFFER.read().strip()
return logs return logs

View File

@@ -6,7 +6,7 @@ from facefusion import state_manager, wording
from facefusion.face_store import clear_static_faces from facefusion.face_store import clear_static_faces
from facefusion.filesystem import is_video from facefusion.filesystem import is_video
from facefusion.uis.core import get_ui_components from facefusion.uis.core import get_ui_components
from facefusion.uis.typing import ComponentOptions from facefusion.uis.types import ComponentOptions
from facefusion.vision import count_video_frame_total from facefusion.vision import count_video_frame_total
TRIM_FRAME_RANGE_SLIDER : Optional[RangeSlider] = None TRIM_FRAME_RANGE_SLIDER : Optional[RangeSlider] = None
@@ -39,7 +39,7 @@ def listen() -> None:
'target_image', 'target_image',
'target_video' 'target_video'
]): ]):
for method in [ 'upload', 'change', 'clear' ]: for method in [ 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = [ TRIM_FRAME_RANGE_SLIDER ]) getattr(ui_component, method)(remote_update, outputs = [ TRIM_FRAME_RANGE_SLIDER ])

View File

@@ -8,17 +8,16 @@ import cv2
import gradio import gradio
from tqdm import tqdm from tqdm import tqdm
from facefusion import logger, state_manager, wording from facefusion import ffmpeg_builder, logger, state_manager, wording
from facefusion.audio import create_empty_audio_frame from facefusion.audio import create_empty_audio_frame
from facefusion.common_helper import get_first, is_windows from facefusion.common_helper import is_windows
from facefusion.content_analyser import analyse_stream from facefusion.content_analyser import analyse_stream
from facefusion.face_analyser import get_average_face, get_many_faces from facefusion.face_analyser import get_average_face, get_many_faces
from facefusion.ffmpeg import open_ffmpeg from facefusion.ffmpeg import open_ffmpeg
from facefusion.filesystem import filter_image_paths from facefusion.filesystem import filter_image_paths, is_directory
from facefusion.processors.core import get_processors_modules from facefusion.processors.core import get_processors_modules
from facefusion.typing import Face, Fps, VisionFrame from facefusion.types import Face, Fps, StreamMode, VisionFrame, WebcamMode
from facefusion.uis.core import get_ui_component from facefusion.uis.core import get_ui_component
from facefusion.uis.typing import StreamMode, WebcamMode
from facefusion.vision import normalize_frame_color, read_static_images, unpack_resolution from facefusion.vision import normalize_frame_color, read_static_images, unpack_resolution
WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None
@@ -164,17 +163,32 @@ def process_stream_frame(source_face : Face, target_vision_frame : VisionFrame)
def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps : Fps) -> subprocess.Popen[bytes]: def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps : Fps) -> subprocess.Popen[bytes]:
commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', stream_resolution, '-r', str(stream_fps), '-i', '-'] commands = ffmpeg_builder.chain(
ffmpeg_builder.capture_video(),
ffmpeg_builder.set_media_resolution(stream_resolution),
ffmpeg_builder.set_conditional_fps(stream_fps)
)
if stream_mode == 'udp': if stream_mode == 'udp':
commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ]) commands.extend(ffmpeg_builder.set_input('-'))
commands.extend(ffmpeg_builder.set_stream_mode('udp'))
commands.extend(ffmpeg_builder.set_output('udp://localhost:27000?pkt_size=1316'))
if stream_mode == 'v4l2': if stream_mode == 'v4l2':
try: device_directory_path = '/sys/devices/virtual/video4linux'
device_name = get_first(os.listdir('/sys/devices/virtual/video4linux'))
if device_name: commands.extend(ffmpeg_builder.set_input('-'))
commands.extend([ '-f', 'v4l2', '/dev/' + device_name ]) commands.extend(ffmpeg_builder.set_stream_mode('v4l2'))
except FileNotFoundError: if is_directory(device_directory_path):
device_names = os.listdir(device_directory_path)
for device_name in device_names:
device_path = '/dev/' + device_name
commands.extend(ffmpeg_builder.set_output(device_path))
else:
logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__) logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__)
return open_ffmpeg(commands) return open_ffmpeg(commands)

Some files were not shown because too many files have changed in this diff Show More