* Mark as NEXT

* Reduce caching to avoid RAM explosion

* Reduce caching to avoid RAM explosion

* Update dependencies

* add face-detector-pad-factor

* update facefusion.ini

* fix test

* change pad to margin

* fix order

* add prepare margin

* use 50% max margin

* Minor fixes part2

* Minor fixes part3

* Minor fixes part4

* Minor fixes part1

* Downgrade onnxruntime as of BiRefNet broken on CPU

add test

update

update facefusion.ini

add birefnet

* rename models

add more models

* Fix versions

* Add .claude to gitignore

* add normalize color

add 4 channel

add colors

* worflows

* cleanup

* cleanup

* cleanup

* cleanup

* add more models (#961)

* Fix naming

* changes

* Fix style and mock Gradio

* Fix style and mock Gradio

* Fix style and mock Gradio

* apply clamp

* remove clamp

* Add normalizer test

* Introduce sanitizer for the rescue (#963)

* Introduce sanitizer for the rescue

* Introduce sanitizer for the rescue

* Introduce sanitizer for the rescue

* prepare ffmpeg for alpha support

* Some cleanup

* Some cleanup

* Fix CI

* List as TypeAlias is not allowed (#967)

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* Add mpeg and mxf support (#968)

* Add mpeg support

* Add mxf support

* Adjust fix_xxx_encoder for the new formats

* Extend output pattern for batch-run (#969)

* Extend output pattern for batch-run

* Add {target_extension} to allowed mixed files

* Catch invalid output pattern keys

* alpha support

* cleanup

* cleanup

* add ProcessorOutputs type

* fix preview and streamer, support alpha for background_remover

* Refactor/open close processors (#972)

* Introduce open/close processors

* Add locales for translator

* Introduce __autoload__ for translator

* More cleanup

* Fix import issues

* Resolve the scope situation for locals

* Fix installer by not using translator

* Fixes after merge

* Fixes after merge

* Fix translator keys in ui

* Use LOCALS in installer

* Update and partial fix DirectML

* Use latest onnxruntime

* Fix performance

* Fix lint issues

* fix mask

* fix lint

* fix lint

* Remove default from translator.get()

* remove 'framerate='

* fix test

* Rename and reorder models

* Align naming

* add alpha preview

* fix frame-by-frame

* Add alpha effect via css

* preview support alpha channel

* fix preview modes

* Use official assets repositories

* Add support for u2net_cloth

* fix naming

* Add more models

* Add vendor, license and year direct to the models

* Add vendor, license and year direct to the models

* Update dependencies, Minor CSS adjustment

* Ready for 3.5.0

* Fix naming

* Update about messages

* Fix return

* Use groups to show/hide

* Update preview

* Conditional merge mask

* Conditional merge mask

* Fix import order

---------

Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com>
Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
This commit is contained in:
Henry Ruhs
2025-11-03 14:05:15 +01:00
committed by GitHub
parent 189d750621
commit 8bf9170577
155 changed files with 3519 additions and 1753 deletions

BIN
.github/preview.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 1.3 MiB

3
.gitignore vendored
View File

@@ -1,6 +1,7 @@
__pycache__
.assets
.claude
.caches
.jobs
.idea
.jobs
.vscode

View File

@@ -13,6 +13,7 @@ output_pattern =
[face_detector]
face_detector_model =
face_detector_size =
face_detector_margin =
face_detector_angles =
face_detector_score =
@@ -65,6 +66,8 @@ output_video_fps =
processors =
age_modifier_model =
age_modifier_direction =
background_remover_model =
background_remover_color =
deep_swapper_model =
deep_swapper_morph =
expression_restorer_model =

View File

@@ -1,7 +1,7 @@
from facefusion import state_manager
from facefusion.filesystem import get_file_name, is_video, resolve_file_paths
from facefusion.jobs import job_store
from facefusion.normalizer import normalize_fps, normalize_padding
from facefusion.normalizer import normalize_fps, normalize_space
from facefusion.processors.core import get_processors_modules
from facefusion.types import ApplyStateItem, Args
from facefusion.vision import detect_video_fps
@@ -55,6 +55,7 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
# face detector
apply_state_item('face_detector_model', args.get('face_detector_model'))
apply_state_item('face_detector_size', args.get('face_detector_size'))
apply_state_item('face_detector_margin', normalize_space(args.get('face_detector_margin')))
apply_state_item('face_detector_angles', args.get('face_detector_angles'))
apply_state_item('face_detector_score', args.get('face_detector_score'))
# face landmarker
@@ -77,7 +78,7 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('face_mask_areas', args.get('face_mask_areas'))
apply_state_item('face_mask_regions', args.get('face_mask_regions'))
apply_state_item('face_mask_blur', args.get('face_mask_blur'))
apply_state_item('face_mask_padding', normalize_padding(args.get('face_mask_padding')))
apply_state_item('face_mask_padding', normalize_space(args.get('face_mask_padding')))
# voice extractor
apply_state_item('voice_extractor_model', args.get('voice_extractor_model'))
# frame extraction

View File

@@ -68,7 +68,9 @@ video_type_set : VideoTypeSet =\
'm4v': 'video/mp4',
'mkv': 'video/x-matroska',
'mp4': 'video/mp4',
'mpeg': 'video/mpeg',
'mov': 'video/quicktime',
'mxf': 'application/mxf',
'webm': 'video/webm',
'wmv': 'video/x-ms-wmv'
}
@@ -151,6 +153,7 @@ job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ]
benchmark_cycle_count_range : Sequence[int] = create_int_range(1, 10, 1)
execution_thread_count_range : Sequence[int] = create_int_range(1, 32, 1)
system_memory_limit_range : Sequence[int] = create_int_range(0, 128, 4)
face_detector_margin_range : Sequence[int] = create_int_range(0, 100, 1)
face_detector_angles : Sequence[Angle] = create_int_range(0, 270, 90)
face_detector_score_range : Sequence[Score] = create_float_range(0.0, 1.0, 0.05)
face_landmarker_score_range : Sequence[Score] = create_float_range(0.0, 1.0, 0.05)

View File

@@ -1,10 +1,10 @@
from typing import Tuple
from typing import List, Tuple
from facefusion.logger import get_package_logger
from facefusion.types import TableContents, TableHeaders
from facefusion.types import TableContent, TableHeader
def render_table(headers : TableHeaders, contents : TableContents) -> None:
def render_table(headers : List[TableHeader], contents : List[List[TableContent]]) -> None:
package_logger = get_package_logger()
table_column, table_separator = create_table_parts(headers, contents)
@@ -19,7 +19,7 @@ def render_table(headers : TableHeaders, contents : TableContents) -> None:
package_logger.critical(table_separator)
def create_table_parts(headers : TableHeaders, contents : TableContents) -> Tuple[str, str]:
def create_table_parts(headers : List[TableHeader], contents : List[List[TableContent]]) -> Tuple[str, str]:
column_parts = []
separator_parts = []
widths = [ len(header) for header in headers ]

View File

@@ -4,7 +4,7 @@ from typing import List, Tuple
import numpy
from tqdm import tqdm
from facefusion import inference_manager, state_manager, wording
from facefusion import inference_manager, state_manager, translator
from facefusion.common_helper import is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
@@ -22,6 +22,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'nsfw_1':
{
'__metadata__':
{
'vendor': 'EraX',
'license': 'Apache-2.0',
'year': 2024
},
'hashes':
{
'content_analyser':
@@ -44,6 +50,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'nsfw_2':
{
'__metadata__':
{
'vendor': 'Marqo',
'license': 'Apache-2.0',
'year': 2024
},
'hashes':
{
'content_analyser':
@@ -66,6 +78,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'nsfw_3':
{
'__metadata__':
{
'vendor': 'Freepik',
'license': 'MIT',
'year': 2025
},
'hashes':
{
'content_analyser':
@@ -152,16 +170,19 @@ def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int
total = 0
counter = 0
with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with tqdm(total = len(frame_range), desc = translator.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
for frame_number in frame_range:
if frame_number % int(video_fps) == 0:
vision_frame = read_video_frame(video_path, frame_number)
total += 1
if analyse_frame(vision_frame):
counter += 1
if counter > 0 and total > 0:
rate = counter / total * 100
progress.set_postfix(rate = rate)
progress.update()

View File

@@ -3,31 +3,21 @@ import itertools
import shutil
import signal
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import time
import numpy
from tqdm import tqdm
from facefusion import benchmarker, cli_helper, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, hash_helper, logger, process_manager, state_manager, video_manager, voice_extractor, wording
from facefusion import benchmarker, cli_helper, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, hash_helper, logger, state_manager, translator, voice_extractor
from facefusion.args import apply_args, collect_job_args, reduce_job_args, reduce_step_args
from facefusion.audio import create_empty_audio_frame, get_audio_frame, get_voice_frame
from facefusion.common_helper import get_first
from facefusion.content_analyser import analyse_image, analyse_video
from facefusion.download import conditional_download_hashes, conditional_download_sources
from facefusion.exit_helper import hard_exit, signal_exit
from facefusion.ffmpeg import copy_image, extract_frames, finalize_image, merge_video, replace_audio, restore_audio
from facefusion.filesystem import filter_audio_paths, get_file_name, is_image, is_video, resolve_file_paths, resolve_file_pattern
from facefusion.filesystem import get_file_extension, get_file_name, is_image, is_video, resolve_file_paths, resolve_file_pattern
from facefusion.jobs import job_helper, job_manager, job_runner
from facefusion.jobs.job_list import compose_job_list
from facefusion.memory import limit_system_memory
from facefusion.processors.core import get_processors_modules
from facefusion.program import create_program
from facefusion.program_helper import validate_args
from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths
from facefusion.time_helper import calculate_end_time
from facefusion.types import Args, ErrorCode
from facefusion.vision import detect_image_resolution, detect_video_resolution, pack_resolution, read_static_image, read_static_images, read_static_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, scale_resolution, write_image
from facefusion.workflows import image_to_image, image_to_video
def cli() -> None:
@@ -85,14 +75,14 @@ def route(args : Args) -> None:
if state_manager.get_item('command') == 'headless-run':
if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1)
error_core = process_headless(args)
hard_exit(error_core)
error_code = process_headless(args)
hard_exit(error_code)
if state_manager.get_item('command') == 'batch-run':
if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
hard_exit(1)
error_core = process_batch(args)
hard_exit(error_core)
error_code = process_batch(args)
hard_exit(error_code)
if state_manager.get_item('command') in [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]:
if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
@@ -103,15 +93,15 @@ def route(args : Args) -> None:
def pre_check() -> bool:
if sys.version_info < (3, 10):
logger.error(wording.get('python_not_supported').format(version = '3.10'), __name__)
logger.error(translator.get('python_not_supported').format(version = '3.10'), __name__)
return False
if not shutil.which('curl'):
logger.error(wording.get('curl_not_installed'), __name__)
logger.error(translator.get('curl_not_installed'), __name__)
return False
if not shutil.which('ffmpeg'):
logger.error(wording.get('ffmpeg_not_installed'), __name__)
logger.error(translator.get('ffmpeg_not_installed'), __name__)
return False
return True
@@ -131,7 +121,7 @@ def common_pre_check() -> bool:
content_analyser_content = inspect.getsource(content_analyser).encode()
content_analyser_hash = hash_helper.create_hash(content_analyser_content)
return all(module.pre_check() for module in common_modules) and content_analyser_hash == '803b5ec7'
return all(module.pre_check() for module in common_modules) and content_analyser_hash == 'b14e7b92'
def processors_pre_check() -> bool:
@@ -179,106 +169,106 @@ def route_job_manager(args : Args) -> ErrorCode:
if state_manager.get_item('command') == 'job-create':
if job_manager.create_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_created').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('job_created').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.error(wording.get('job_not_created').format(job_id = state_manager.get_item('job_id')), __name__)
logger.error(translator.get('job_not_created').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-submit':
if job_manager.submit_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('job_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.error(wording.get('job_not_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
logger.error(translator.get('job_not_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-submit-all':
if job_manager.submit_jobs(state_manager.get_item('halt_on_error')):
logger.info(wording.get('job_all_submitted'), __name__)
logger.info(translator.get('job_all_submitted'), __name__)
return 0
logger.error(wording.get('job_all_not_submitted'), __name__)
logger.error(translator.get('job_all_not_submitted'), __name__)
return 1
if state_manager.get_item('command') == 'job-delete':
if job_manager.delete_job(state_manager.get_item('job_id')):
logger.info(wording.get('job_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('job_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.error(wording.get('job_not_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
logger.error(translator.get('job_not_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-delete-all':
if job_manager.delete_jobs(state_manager.get_item('halt_on_error')):
logger.info(wording.get('job_all_deleted'), __name__)
logger.info(translator.get('job_all_deleted'), __name__)
return 0
logger.error(wording.get('job_all_not_deleted'), __name__)
logger.error(translator.get('job_all_not_deleted'), __name__)
return 1
if state_manager.get_item('command') == 'job-add-step':
step_args = reduce_step_args(args)
if job_manager.add_step(state_manager.get_item('job_id'), step_args):
logger.info(wording.get('job_step_added').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('job_step_added').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.error(wording.get('job_step_not_added').format(job_id = state_manager.get_item('job_id')), __name__)
logger.error(translator.get('job_step_not_added').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-remix-step':
step_args = reduce_step_args(args)
if job_manager.remix_step(state_manager.get_item('job_id'), state_manager.get_item('step_index'), step_args):
logger.info(wording.get('job_remix_step_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.info(translator.get('job_remix_step_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 0
logger.error(wording.get('job_remix_step_not_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.error(translator.get('job_remix_step_not_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 1
if state_manager.get_item('command') == 'job-insert-step':
step_args = reduce_step_args(args)
if job_manager.insert_step(state_manager.get_item('job_id'), state_manager.get_item('step_index'), step_args):
logger.info(wording.get('job_step_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.info(translator.get('job_step_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 0
logger.error(wording.get('job_step_not_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.error(translator.get('job_step_not_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 1
if state_manager.get_item('command') == 'job-remove-step':
if job_manager.remove_step(state_manager.get_item('job_id'), state_manager.get_item('step_index')):
logger.info(wording.get('job_step_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.info(translator.get('job_step_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 0
logger.error(wording.get('job_step_not_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
logger.error(translator.get('job_step_not_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
return 1
return 1
def route_job_runner() -> ErrorCode:
if state_manager.get_item('command') == 'job-run':
logger.info(wording.get('running_job').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('running_job').format(job_id = state_manager.get_item('job_id')), __name__)
if job_runner.run_job(state_manager.get_item('job_id'), process_step):
logger.info(wording.get('processing_job_succeeded').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('processing_job_succeeded').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-run-all':
logger.info(wording.get('running_jobs'), __name__)
logger.info(translator.get('running_jobs'), __name__)
if job_runner.run_jobs(process_step, state_manager.get_item('halt_on_error')):
logger.info(wording.get('processing_jobs_succeeded'), __name__)
logger.info(translator.get('processing_jobs_succeeded'), __name__)
return 0
logger.info(wording.get('processing_jobs_failed'), __name__)
logger.info(translator.get('processing_jobs_failed'), __name__)
return 1
if state_manager.get_item('command') == 'job-retry':
logger.info(wording.get('retrying_job').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('retrying_job').format(job_id = state_manager.get_item('job_id')), __name__)
if job_runner.retry_job(state_manager.get_item('job_id'), process_step):
logger.info(wording.get('processing_job_succeeded').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('processing_job_succeeded').format(job_id = state_manager.get_item('job_id')), __name__)
return 0
logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
logger.info(translator.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
return 1
if state_manager.get_item('command') == 'job-retry-all':
logger.info(wording.get('retrying_jobs'), __name__)
logger.info(translator.get('retrying_jobs'), __name__)
if job_runner.retry_jobs(process_step, state_manager.get_item('halt_on_error')):
logger.info(wording.get('processing_jobs_succeeded'), __name__)
logger.info(translator.get('processing_jobs_succeeded'), __name__)
return 0
logger.info(wording.get('processing_jobs_failed'), __name__)
logger.info(translator.get('processing_jobs_failed'), __name__)
return 1
return 2
@@ -304,7 +294,12 @@ def process_batch(args : Args) -> ErrorCode:
for index, (source_path, target_path) in enumerate(itertools.product(source_paths, target_paths)):
step_args['source_paths'] = [ source_path ]
step_args['target_path'] = target_path
step_args['output_path'] = job_args.get('output_pattern').format(index = index)
try:
step_args['output_path'] = job_args.get('output_pattern').format(index = index, source_name = get_file_name(source_path), target_name = get_file_name(target_path), target_extension = get_file_extension(target_path))
except KeyError:
return 1
if not job_manager.add_step(job_id, step_args):
return 1
if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step):
@@ -313,7 +308,12 @@ def process_batch(args : Args) -> ErrorCode:
if not source_paths and target_paths:
for index, target_path in enumerate(target_paths):
step_args['target_path'] = target_path
step_args['output_path'] = job_args.get('output_pattern').format(index = index)
try:
step_args['output_path'] = job_args.get('output_pattern').format(index = index, target_name = get_file_name(target_path), target_extension = get_file_extension(target_path))
except KeyError:
return 1
if not job_manager.add_step(job_id, step_args):
return 1
if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step):
@@ -326,7 +326,7 @@ def process_step(job_id : str, step_index : int, step_args : Args) -> bool:
step_args.update(collect_job_args())
apply_args(step_args, state_manager.set_item)
logger.info(wording.get('processing_step').format(step_current = step_index + 1, step_total = step_total), __name__)
logger.info(translator.get('processing_step').format(step_current = step_index + 1, step_total = step_total), __name__)
if common_pre_check() and processors_pre_check():
error_code = conditional_process()
return error_code == 0
@@ -341,218 +341,10 @@ def conditional_process() -> ErrorCode:
return 2
if is_image(state_manager.get_item('target_path')):
return process_image(start_time)
return image_to_image.process(start_time)
if is_video(state_manager.get_item('target_path')):
return process_video(start_time)
return image_to_video.process(start_time)
return 0
def process_image(start_time : float) -> ErrorCode:
if analyse_image(state_manager.get_item('target_path')):
return 3
logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path'))
logger.debug(wording.get('creating_temp'), __name__)
create_temp_directory(state_manager.get_item('target_path'))
process_manager.start()
output_image_resolution = scale_resolution(detect_image_resolution(state_manager.get_item('target_path')), state_manager.get_item('output_image_scale'))
temp_image_resolution = restrict_image_resolution(state_manager.get_item('target_path'), output_image_resolution)
logger.info(wording.get('copying_image').format(resolution = pack_resolution(temp_image_resolution)), __name__)
if copy_image(state_manager.get_item('target_path'), temp_image_resolution):
logger.debug(wording.get('copying_image_succeeded'), __name__)
else:
logger.error(wording.get('copying_image_failed'), __name__)
process_manager.end()
return 1
temp_image_path = get_temp_file_path(state_manager.get_item('target_path'))
reference_vision_frame = read_static_image(temp_image_path)
source_vision_frames = read_static_images(state_manager.get_item('source_paths'))
source_audio_frame = create_empty_audio_frame()
source_voice_frame = create_empty_audio_frame()
target_vision_frame = read_static_image(temp_image_path)
temp_vision_frame = target_vision_frame.copy()
for processor_module in get_processors_modules(state_manager.get_item('processors')):
logger.info(wording.get('processing'), processor_module.__name__)
temp_vision_frame = processor_module.process_frame(
{
'reference_vision_frame': reference_vision_frame,
'source_vision_frames': source_vision_frames,
'source_audio_frame': source_audio_frame,
'source_voice_frame': source_voice_frame,
'target_vision_frame': target_vision_frame,
'temp_vision_frame': temp_vision_frame
})
processor_module.post_process()
write_image(temp_image_path, temp_vision_frame)
if is_process_stopping():
return 4
logger.info(wording.get('finalizing_image').format(resolution = pack_resolution(output_image_resolution)), __name__)
if finalize_image(state_manager.get_item('target_path'), state_manager.get_item('output_path'), output_image_resolution):
logger.debug(wording.get('finalizing_image_succeeded'), __name__)
else:
logger.warn(wording.get('finalizing_image_skipped'), __name__)
logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path'))
if is_image(state_manager.get_item('output_path')):
logger.info(wording.get('processing_image_succeeded').format(seconds = calculate_end_time(start_time)), __name__)
else:
logger.error(wording.get('processing_image_failed'), __name__)
process_manager.end()
return 1
process_manager.end()
return 0
def process_video(start_time : float) -> ErrorCode:
trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end'))
if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end):
return 3
logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path'))
logger.debug(wording.get('creating_temp'), __name__)
create_temp_directory(state_manager.get_item('target_path'))
process_manager.start()
output_video_resolution = scale_resolution(detect_video_resolution(state_manager.get_item('target_path')), state_manager.get_item('output_video_scale'))
temp_video_resolution = restrict_video_resolution(state_manager.get_item('target_path'), output_video_resolution)
temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
logger.info(wording.get('extracting_frames').format(resolution = pack_resolution(temp_video_resolution), fps = temp_video_fps), __name__)
if extract_frames(state_manager.get_item('target_path'), temp_video_resolution, temp_video_fps, trim_frame_start, trim_frame_end):
logger.debug(wording.get('extracting_frames_succeeded'), __name__)
else:
if is_process_stopping():
return 4
logger.error(wording.get('extracting_frames_failed'), __name__)
process_manager.end()
return 1
temp_frame_paths = resolve_temp_frame_paths(state_manager.get_item('target_path'))
if temp_frame_paths:
with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
progress.set_postfix(execution_providers = state_manager.get_item('execution_providers'))
with ThreadPoolExecutor(max_workers = state_manager.get_item('execution_thread_count')) as executor:
futures = []
for frame_number, temp_frame_path in enumerate(temp_frame_paths):
future = executor.submit(process_temp_frame, temp_frame_path, frame_number)
futures.append(future)
for future in as_completed(futures):
if is_process_stopping():
for __future__ in futures:
__future__.cancel()
if not future.cancelled():
future.result()
progress.update()
for processor_module in get_processors_modules(state_manager.get_item('processors')):
processor_module.post_process()
if is_process_stopping():
return 4
else:
logger.error(wording.get('temp_frames_not_found'), __name__)
process_manager.end()
return 1
logger.info(wording.get('merging_video').format(resolution = pack_resolution(output_video_resolution), fps = state_manager.get_item('output_video_fps')), __name__)
if merge_video(state_manager.get_item('target_path'), temp_video_fps, output_video_resolution, state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end):
logger.debug(wording.get('merging_video_succeeded'), __name__)
else:
if is_process_stopping():
return 4
logger.error(wording.get('merging_video_failed'), __name__)
process_manager.end()
return 1
if state_manager.get_item('output_audio_volume') == 0:
logger.info(wording.get('skipping_audio'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
else:
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
if source_audio_path:
if replace_audio(state_manager.get_item('target_path'), source_audio_path, state_manager.get_item('output_path')):
video_manager.clear_video_pool()
logger.debug(wording.get('replacing_audio_succeeded'), __name__)
else:
video_manager.clear_video_pool()
if is_process_stopping():
return 4
logger.warn(wording.get('replacing_audio_skipped'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
else:
if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), trim_frame_start, trim_frame_end):
video_manager.clear_video_pool()
logger.debug(wording.get('restoring_audio_succeeded'), __name__)
else:
video_manager.clear_video_pool()
if is_process_stopping():
return 4
logger.warn(wording.get('restoring_audio_skipped'), __name__)
move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
logger.debug(wording.get('clearing_temp'), __name__)
clear_temp_directory(state_manager.get_item('target_path'))
if is_video(state_manager.get_item('output_path')):
logger.info(wording.get('processing_video_succeeded').format(seconds = calculate_end_time(start_time)), __name__)
else:
logger.error(wording.get('processing_video_failed'), __name__)
process_manager.end()
return 1
process_manager.end()
return 0
def process_temp_frame(temp_frame_path : str, frame_number : int) -> bool:
reference_vision_frame = read_static_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
source_vision_frames = read_static_images(state_manager.get_item('source_paths'))
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
target_vision_frame = read_static_image(temp_frame_path)
temp_vision_frame = target_vision_frame.copy()
source_audio_frame = get_audio_frame(source_audio_path, temp_video_fps, frame_number)
source_voice_frame = get_voice_frame(source_audio_path, temp_video_fps, frame_number)
if not numpy.any(source_audio_frame):
source_audio_frame = create_empty_audio_frame()
if not numpy.any(source_voice_frame):
source_voice_frame = create_empty_audio_frame()
for processor_module in get_processors_modules(state_manager.get_item('processors')):
temp_vision_frame = processor_module.process_frame(
{
'reference_vision_frame': reference_vision_frame,
'source_vision_frames': source_vision_frames,
'source_audio_frame': source_audio_frame,
'source_voice_frame': source_voice_frame,
'target_vision_frame': target_vision_frame,
'temp_vision_frame': temp_vision_frame
})
return write_image(temp_frame_path, temp_vision_frame)
def is_process_stopping() -> bool:
if process_manager.is_stopping():
process_manager.end()
logger.info(wording.get('processing_stopped'), __name__)
return process_manager.is_pending()

View File

@@ -1,27 +1,28 @@
import itertools
import shutil
from typing import List
from facefusion import metadata
from facefusion.types import Commands
from facefusion.types import Command
def run(commands : Commands) -> Commands:
def run(commands : List[Command]) -> List[Command]:
user_agent = metadata.get('name') + '/' + metadata.get('version')
return [ shutil.which('curl'), '--user-agent', user_agent, '--insecure', '--location', '--silent' ] + commands
def chain(*commands : Commands) -> Commands:
def chain(*commands : List[Command]) -> List[Command]:
return list(itertools.chain(*commands))
def head(url : str) -> Commands:
def head(url : str) -> List[Command]:
return [ '-I', url ]
def download(url : str, download_file_path : str) -> Commands:
def download(url : str, download_file_path : str) -> List[Command]:
return [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ]
def set_timeout(timeout : int) -> Commands:
def set_timeout(timeout : int) -> List[Command]:
return [ '--connect-timeout', str(timeout) ]

View File

@@ -7,13 +7,13 @@ from urllib.parse import urlparse
from tqdm import tqdm
import facefusion.choices
from facefusion import curl_builder, logger, process_manager, state_manager, wording
from facefusion import curl_builder, logger, process_manager, state_manager, translator
from facefusion.filesystem import get_file_name, get_file_size, is_file, remove_file
from facefusion.hash_helper import validate_hash
from facefusion.types import Commands, DownloadProvider, DownloadSet
from facefusion.types import Command, DownloadProvider, DownloadSet
def open_curl(commands : Commands) -> subprocess.Popen[bytes]:
def open_curl(commands : List[Command]) -> subprocess.Popen[bytes]:
commands = curl_builder.run(commands)
return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
@@ -26,7 +26,7 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non
download_size = get_static_download_size(url)
if initial_size < download_size:
with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with tqdm(total = download_size, initial = initial_size, desc = translator.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
commands = curl_builder.chain(
curl_builder.download(url, download_file_path),
curl_builder.set_timeout(5)
@@ -87,10 +87,10 @@ def conditional_download_hashes(hash_set : DownloadSet) -> bool:
for valid_hash_path in valid_hash_paths:
valid_hash_file_name = get_file_name(valid_hash_path)
logger.debug(wording.get('validating_hash_succeeded').format(hash_file_name = valid_hash_file_name), __name__)
logger.debug(translator.get('validating_hash_succeeded').format(hash_file_name = valid_hash_file_name), __name__)
for invalid_hash_path in invalid_hash_paths:
invalid_hash_file_name = get_file_name(invalid_hash_path)
logger.error(wording.get('validating_hash_failed').format(hash_file_name = invalid_hash_file_name), __name__)
logger.error(translator.get('validating_hash_failed').format(hash_file_name = invalid_hash_file_name), __name__)
if not invalid_hash_paths:
process_manager.end()
@@ -114,13 +114,13 @@ def conditional_download_sources(source_set : DownloadSet) -> bool:
for valid_source_path in valid_source_paths:
valid_source_file_name = get_file_name(valid_source_path)
logger.debug(wording.get('validating_source_succeeded').format(source_file_name = valid_source_file_name), __name__)
logger.debug(translator.get('validating_source_succeeded').format(source_file_name = valid_source_file_name), __name__)
for invalid_source_path in invalid_source_paths:
invalid_source_file_name = get_file_name(invalid_source_path)
logger.error(wording.get('validating_source_failed').format(source_file_name = invalid_source_file_name), __name__)
logger.error(translator.get('validating_source_failed').format(source_file_name = invalid_source_file_name), __name__)
if remove_file(invalid_source_path):
logger.error(wording.get('deleting_corrupt_source').format(source_file_name = invalid_source_file_name), __name__)
logger.error(translator.get('deleting_corrupt_source').format(source_file_name = invalid_source_file_name), __name__)
if not invalid_source_paths:
process_manager.end()

View File

@@ -17,6 +17,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'fairface':
{
'__metadata__':
{
'vendor': 'dchen236',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'face_classifier':

View File

@@ -9,7 +9,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.face_helper import create_rotation_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points
from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import thread_semaphore
from facefusion.types import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame
from facefusion.types import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, Margin, ModelSet, Score, VisionFrame
from facefusion.vision import restrict_frame, unpack_resolution
@@ -19,6 +19,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'retinaface':
{
'__metadata__':
{
'vendor': 'InsightFace',
'license': 'Non-Commercial',
'year': 2020
},
'hashes':
{
'retinaface':
@@ -38,6 +44,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'scrfd':
{
'__metadata__':
{
'vendor': 'InsightFace',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'scrfd':
@@ -57,6 +69,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'yolo_face':
{
'__metadata__':
{
'vendor': 'derronqi',
'license': 'GPL-3.0',
'year': 2022
},
'hashes':
{
'yolo_face':
@@ -76,6 +94,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'yunet':
{
'__metadata__':
{
'vendor': 'OpenCV',
'license': 'MIT',
'year': 2023
},
'hashes':
{
'yunet':
@@ -128,38 +152,49 @@ def pre_check() -> bool:
def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
margin_top, margin_right, margin_bottom, margin_left = prepare_margin(vision_frame)
margin_vision_frame = numpy.pad(vision_frame, ((margin_top, margin_bottom), (margin_left, margin_right), (0, 0)))
all_bounding_boxes : List[BoundingBox] = []
all_face_scores : List[Score] = []
all_face_landmarks_5 : List[FaceLandmark5] = []
if state_manager.get_item('face_detector_model') in [ 'many', 'retinaface' ]:
bounding_boxes, face_scores, face_landmarks_5 = detect_with_retinaface(vision_frame, state_manager.get_item('face_detector_size'))
bounding_boxes, face_scores, face_landmarks_5 = detect_with_retinaface(margin_vision_frame, state_manager.get_item('face_detector_size'))
all_bounding_boxes.extend(bounding_boxes)
all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5)
if state_manager.get_item('face_detector_model') in [ 'many', 'scrfd' ]:
bounding_boxes, face_scores, face_landmarks_5 = detect_with_scrfd(vision_frame, state_manager.get_item('face_detector_size'))
bounding_boxes, face_scores, face_landmarks_5 = detect_with_scrfd(margin_vision_frame, state_manager.get_item('face_detector_size'))
all_bounding_boxes.extend(bounding_boxes)
all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5)
if state_manager.get_item('face_detector_model') in [ 'many', 'yolo_face' ]:
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yolo_face(vision_frame, state_manager.get_item('face_detector_size'))
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yolo_face(margin_vision_frame, state_manager.get_item('face_detector_size'))
all_bounding_boxes.extend(bounding_boxes)
all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5)
if state_manager.get_item('face_detector_model') == 'yunet':
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yunet(vision_frame, state_manager.get_item('face_detector_size'))
bounding_boxes, face_scores, face_landmarks_5 = detect_with_yunet(margin_vision_frame, state_manager.get_item('face_detector_size'))
all_bounding_boxes.extend(bounding_boxes)
all_face_scores.extend(face_scores)
all_face_landmarks_5.extend(face_landmarks_5)
all_bounding_boxes = [ normalize_bounding_box(all_bounding_box) for all_bounding_box in all_bounding_boxes ]
all_bounding_boxes = [ normalize_bounding_box(all_bounding_box) - numpy.array([ margin_left, margin_top, margin_left, margin_top ]) for all_bounding_box in all_bounding_boxes ]
all_face_landmarks_5 = [ all_face_landmark_5 - numpy.array([ margin_left, margin_top ]) for all_face_landmark_5 in all_face_landmarks_5 ]
return all_bounding_boxes, all_face_scores, all_face_landmarks_5
def prepare_margin(vision_frame : VisionFrame) -> Margin:
margin_top = int(vision_frame.shape[0] * numpy.interp(state_manager.get_item('face_detector_margin')[0], [ 0, 100 ], [ 0, 0.5 ]))
margin_right = int(vision_frame.shape[1] * numpy.interp(state_manager.get_item('face_detector_margin')[1], [ 0, 100 ], [ 0, 0.5 ]))
margin_bottom = int(vision_frame.shape[0] * numpy.interp(state_manager.get_item('face_detector_margin')[2], [ 0, 100 ], [ 0, 0.5 ]))
margin_left = int(vision_frame.shape[1] * numpy.interp(state_manager.get_item('face_detector_margin')[3], [ 0, 100 ], [ 0, 0.5 ]))
return margin_top, margin_right, margin_bottom, margin_left
def detect_faces_by_angle(vision_frame : VisionFrame, face_angle : Angle) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
rotation_matrix, rotation_size = create_rotation_matrix_and_size(face_angle, vision_frame.shape[:2][::-1])
rotation_vision_frame = cv2.warpAffine(vision_frame, rotation_matrix, rotation_size)

View File

@@ -98,17 +98,17 @@ def warp_face_by_translation(temp_vision_frame : VisionFrame, translation : Tran
return crop_vision_frame, affine_matrix
def paste_back(temp_vision_frame : VisionFrame, crop_vision_frame : VisionFrame, crop_mask : Mask, affine_matrix : Matrix) -> VisionFrame:
def paste_back(temp_vision_frame : VisionFrame, crop_vision_frame : VisionFrame, crop_vision_mask : Mask, affine_matrix : Matrix) -> VisionFrame:
paste_bounding_box, paste_matrix = calculate_paste_area(temp_vision_frame, crop_vision_frame, affine_matrix)
x1, y1, x2, y2 = paste_bounding_box
paste_width = x2 - x1
paste_height = y2 - y1
inverse_mask = cv2.warpAffine(crop_mask, paste_matrix, (paste_width, paste_height)).clip(0, 1)
inverse_mask = numpy.expand_dims(inverse_mask, axis = -1)
inverse_vision_mask = cv2.warpAffine(crop_vision_mask, paste_matrix, (paste_width, paste_height)).clip(0, 1)
inverse_vision_mask = numpy.expand_dims(inverse_vision_mask, axis = -1)
inverse_vision_frame = cv2.warpAffine(crop_vision_frame, paste_matrix, (paste_width, paste_height), borderMode = cv2.BORDER_REPLICATE)
temp_vision_frame = temp_vision_frame.copy()
paste_vision_frame = temp_vision_frame[y1:y2, x1:x2]
paste_vision_frame = paste_vision_frame * (1 - inverse_mask) + inverse_vision_frame * inverse_mask
paste_vision_frame = paste_vision_frame * (1 - inverse_vision_mask) + inverse_vision_frame * inverse_vision_mask
temp_vision_frame[y1:y2, x1:x2] = paste_vision_frame.astype(temp_vision_frame.dtype)
return temp_vision_frame

View File

@@ -18,6 +18,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'2dfan4':
{
'__metadata__':
{
'vendor': 'breadbread1984',
'license': 'MIT',
'year': 2018
},
'hashes':
{
'2dfan4':
@@ -38,6 +44,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'peppa_wutz':
{
'__metadata__':
{
'vendor': 'Unknown',
'license': 'Apache-2.0',
'year': 2023
},
'hashes':
{
'peppa_wutz':
@@ -58,6 +70,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'fan_68_5':
{
'__metadata__':
{
'vendor': 'FaceFusion',
'license': 'OpenRAIL-M',
'year': 2024
},
'hashes':
{
'fan_68_5':

View File

@@ -18,6 +18,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'xseg_1':
{
'__metadata__':
{
'vendor': 'DeepFaceLab',
'license': 'GPL-3.0',
'year': 2021
},
'hashes':
{
'face_occluder':
@@ -38,6 +44,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'xseg_2':
{
'__metadata__':
{
'vendor': 'DeepFaceLab',
'license': 'GPL-3.0',
'year': 2021
},
'hashes':
{
'face_occluder':
@@ -58,6 +70,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'xseg_3':
{
'__metadata__':
{
'vendor': 'DeepFaceLab',
'license': 'GPL-3.0',
'year': 2021
},
'hashes':
{
'face_occluder':
@@ -78,6 +96,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'bisenet_resnet_18':
{
'__metadata__':
{
'vendor': 'yakhyo',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'face_parser':
@@ -98,6 +122,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'bisenet_resnet_34':
{
'__metadata__':
{
'vendor': 'yakhyo',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'face_parser':

View File

@@ -17,6 +17,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'arcface':
{
'__metadata__':
{
'vendor': 'InsightFace',
'license': 'Non-Commercial',
'year': 2018
},
'hashes':
{
'face_recognizer':

View File

@@ -7,14 +7,14 @@ from typing import List, Optional, cast
from tqdm import tqdm
import facefusion.choices
from facefusion import ffmpeg_builder, logger, process_manager, state_manager, wording
from facefusion import ffmpeg_builder, logger, process_manager, state_manager, translator
from facefusion.filesystem import get_file_format, remove_file
from facefusion.temp_helper import get_temp_file_path, get_temp_frames_pattern
from facefusion.types import AudioBuffer, AudioEncoder, Commands, EncoderSet, Fps, Resolution, UpdateProgress, VideoEncoder, VideoFormat
from facefusion.types import AudioBuffer, AudioEncoder, Command, EncoderSet, Fps, Resolution, UpdateProgress, VideoEncoder, VideoFormat
from facefusion.vision import detect_video_duration, detect_video_fps, pack_resolution, predict_video_frame_total
def run_ffmpeg_with_progress(commands : Commands, update_progress : UpdateProgress) -> subprocess.Popen[bytes]:
def run_ffmpeg_with_progress(commands : List[Command], update_progress : UpdateProgress) -> subprocess.Popen[bytes]:
log_level = state_manager.get_item('log_level')
commands.extend(ffmpeg_builder.set_progress())
commands.extend(ffmpeg_builder.cast_stream())
@@ -45,7 +45,7 @@ def update_progress(progress : tqdm, frame_number : int) -> None:
progress.update(frame_number - progress.n)
def run_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
def run_ffmpeg(commands : List[Command]) -> subprocess.Popen[bytes]:
log_level = state_manager.get_item('log_level')
commands = ffmpeg_builder.run(commands)
process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
@@ -65,7 +65,7 @@ def run_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
return process
def open_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
def open_ffmpeg(commands : List[Command]) -> subprocess.Popen[bytes]:
commands = ffmpeg_builder.run(commands)
return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
@@ -119,7 +119,7 @@ def extract_frames(target_path : str, temp_video_resolution : Resolution, temp_v
ffmpeg_builder.set_output(temp_frames_pattern)
)
with tqdm(total = extract_frame_total, desc = wording.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with tqdm(total = extract_frame_total, desc = translator.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
process = run_ffmpeg_with_progress(commands, partial(update_progress, progress))
return process.returncode == 0
@@ -229,12 +229,15 @@ def merge_video(target_path : str, temp_video_fps : Fps, output_video_resolution
ffmpeg_builder.set_video_encoder(output_video_encoder),
ffmpeg_builder.set_video_quality(output_video_encoder, output_video_quality),
ffmpeg_builder.set_video_preset(output_video_encoder, output_video_preset),
ffmpeg_builder.set_video_fps(output_video_fps),
ffmpeg_builder.concat(
ffmpeg_builder.set_video_fps(output_video_fps),
ffmpeg_builder.keep_video_alpha(output_video_encoder)
),
ffmpeg_builder.set_pixel_format(output_video_encoder),
ffmpeg_builder.force_output(temp_video_path)
)
with tqdm(total = merge_frame_total, desc = wording.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with tqdm(total = merge_frame_total, desc = translator.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
process = run_ffmpeg_with_progress(commands, partial(update_progress, progress))
return process.returncode == 0
@@ -265,17 +268,19 @@ def concat_video(output_path : str, temp_output_paths : List[str]) -> bool:
def fix_audio_encoder(video_format : VideoFormat, audio_encoder : AudioEncoder) -> AudioEncoder:
if video_format == 'avi' and audio_encoder == 'libopus':
return 'aac'
if video_format in [ 'm4v', 'wmv' ]:
if video_format in [ 'm4v', 'mpeg', 'wmv' ]:
return 'aac'
if video_format == 'mov' and audio_encoder in [ 'flac', 'libopus' ]:
return 'aac'
if video_format == 'mxf':
return 'pcm_s16le'
if video_format == 'webm':
return 'libopus'
return audio_encoder
def fix_video_encoder(video_format : VideoFormat, video_encoder : VideoEncoder) -> VideoEncoder:
if video_format in [ 'm4v', 'wmv' ]:
if video_format in [ 'm4v', 'mpeg', 'mxf', 'wmv' ]:
return 'libx264'
if video_format in [ 'mkv', 'mp4' ] and video_encoder == 'rawvideo':
return 'libx264'

View File

@@ -1,54 +1,69 @@
import itertools
import shutil
from typing import Optional
from typing import List, Optional
import numpy
from facefusion.filesystem import get_file_format
from facefusion.types import AudioEncoder, Commands, Duration, Fps, StreamMode, VideoEncoder, VideoPreset
from facefusion.types import AudioEncoder, Command, CommandSet, Duration, Fps, StreamMode, VideoEncoder, VideoPreset
def run(commands : Commands) -> Commands:
def run(commands : List[Command]) -> List[Command]:
return [ shutil.which('ffmpeg'), '-loglevel', 'error' ] + commands
def chain(*commands : Commands) -> Commands:
def chain(*commands : List[Command]) -> List[Command]:
return list(itertools.chain(*commands))
def get_encoders() -> Commands:
def concat(*__commands__ : List[Command]) -> List[Command]:
commands = []
command_set : CommandSet = {}
for command in __commands__:
for argument, value in zip(command[::2], command[1::2]):
command_set.setdefault(argument, []).append(value)
for argument, values in command_set.items():
commands.append(argument)
commands.append(','.join(values))
return commands
def get_encoders() -> List[Command]:
return [ '-encoders' ]
def set_hardware_accelerator(value : str) -> Commands:
def set_hardware_accelerator(value : str) -> List[Command]:
return [ '-hwaccel', value ]
def set_progress() -> Commands:
def set_progress() -> List[Command]:
return [ '-progress' ]
def set_input(input_path : str) -> Commands:
def set_input(input_path : str) -> List[Command]:
return [ '-i', input_path ]
def set_input_fps(input_fps : Fps) -> Commands:
def set_input_fps(input_fps : Fps) -> List[Command]:
return [ '-r', str(input_fps)]
def set_output(output_path : str) -> Commands:
def set_output(output_path : str) -> List[Command]:
return [ output_path ]
def force_output(output_path : str) -> Commands:
def force_output(output_path : str) -> List[Command]:
return [ '-y', output_path ]
def cast_stream() -> Commands:
def cast_stream() -> List[Command]:
return [ '-' ]
def set_stream_mode(stream_mode : StreamMode) -> Commands:
def set_stream_mode(stream_mode : StreamMode) -> List[Command]:
if stream_mode == 'udp':
return [ '-f', 'mpegts' ]
if stream_mode == 'v4l2':
@@ -56,25 +71,27 @@ def set_stream_mode(stream_mode : StreamMode) -> Commands:
return []
def set_stream_quality(stream_quality : int) -> Commands:
def set_stream_quality(stream_quality : int) -> List[Command]:
return [ '-b:v', str(stream_quality) + 'k' ]
def unsafe_concat() -> Commands:
def unsafe_concat() -> List[Command]:
return [ '-f', 'concat', '-safe', '0' ]
def set_pixel_format(video_encoder : VideoEncoder) -> Commands:
def set_pixel_format(video_encoder : VideoEncoder) -> List[Command]:
if video_encoder == 'rawvideo':
return [ '-pix_fmt', 'rgb24' ]
if video_encoder == 'libvpx-vp9':
return [ '-pix_fmt', 'yuva420p' ]
return [ '-pix_fmt', 'yuv420p' ]
def set_frame_quality(frame_quality : int) -> Commands:
def set_frame_quality(frame_quality : int) -> List[Command]:
return [ '-q:v', str(frame_quality) ]
def select_frame_range(frame_start : int, frame_end : int, video_fps : Fps) -> Commands:
def select_frame_range(frame_start : int, frame_end : int, video_fps : Fps) -> List[Command]:
if isinstance(frame_start, int) and isinstance(frame_end, int):
return [ '-vf', 'trim=start_frame=' + str(frame_start) + ':end_frame=' + str(frame_end) + ',fps=' + str(video_fps) ]
if isinstance(frame_start, int):
@@ -84,11 +101,11 @@ def select_frame_range(frame_start : int, frame_end : int, video_fps : Fps) -> C
return [ '-vf', 'fps=' + str(video_fps) ]
def prevent_frame_drop() -> Commands:
def prevent_frame_drop() -> List[Command]:
return [ '-vsync', '0' ]
def select_media_range(frame_start : int, frame_end : int, media_fps : Fps) -> Commands:
def select_media_range(frame_start : int, frame_end : int, media_fps : Fps) -> List[Command]:
commands = []
if isinstance(frame_start, int):
@@ -98,15 +115,15 @@ def select_media_range(frame_start : int, frame_end : int, media_fps : Fps) -> C
return commands
def select_media_stream(media_stream : str) -> Commands:
def select_media_stream(media_stream : str) -> List[Command]:
return [ '-map', media_stream ]
def set_media_resolution(video_resolution : str) -> Commands:
def set_media_resolution(video_resolution : str) -> List[Command]:
return [ '-s', video_resolution ]
def set_image_quality(image_path : str, image_quality : int) -> Commands:
def set_image_quality(image_path : str, image_quality : int) -> List[Command]:
if get_file_format(image_path) == 'webp':
return [ '-q:v', str(image_quality) ]
@@ -114,19 +131,19 @@ def set_image_quality(image_path : str, image_quality : int) -> Commands:
return [ '-q:v', str(image_compression) ]
def set_audio_encoder(audio_codec : str) -> Commands:
def set_audio_encoder(audio_codec : str) -> List[Command]:
return [ '-c:a', audio_codec ]
def copy_audio_encoder() -> Commands:
def copy_audio_encoder() -> List[Command]:
return set_audio_encoder('copy')
def set_audio_sample_rate(audio_sample_rate : int) -> Commands:
def set_audio_sample_rate(audio_sample_rate : int) -> List[Command]:
return [ '-ar', str(audio_sample_rate) ]
def set_audio_sample_size(audio_sample_size : int) -> Commands:
def set_audio_sample_size(audio_sample_size : int) -> List[Command]:
if audio_sample_size == 16:
return [ '-f', 's16le' ]
if audio_sample_size == 32:
@@ -134,11 +151,11 @@ def set_audio_sample_size(audio_sample_size : int) -> Commands:
return []
def set_audio_channel_total(audio_channel_total : int) -> Commands:
def set_audio_channel_total(audio_channel_total : int) -> List[Command]:
return [ '-ac', str(audio_channel_total) ]
def set_audio_quality(audio_encoder : AudioEncoder, audio_quality : int) -> Commands:
def set_audio_quality(audio_encoder : AudioEncoder, audio_quality : int) -> List[Command]:
if audio_encoder == 'aac':
audio_compression = numpy.round(numpy.interp(audio_quality, [ 0, 100 ], [ 0.1, 2.0 ]), 1).astype(float).item()
return [ '-q:a', str(audio_compression) ]
@@ -154,19 +171,19 @@ def set_audio_quality(audio_encoder : AudioEncoder, audio_quality : int) -> Comm
return []
def set_audio_volume(audio_volume : int) -> Commands:
def set_audio_volume(audio_volume : int) -> List[Command]:
return [ '-filter:a', 'volume=' + str(audio_volume / 100) ]
def set_video_encoder(video_encoder : str) -> Commands:
def set_video_encoder(video_encoder : str) -> List[Command]:
return [ '-c:v', video_encoder ]
def copy_video_encoder() -> Commands:
def copy_video_encoder() -> List[Command]:
return set_video_encoder('copy')
def set_video_quality(video_encoder : VideoEncoder, video_quality : int) -> Commands:
def set_video_quality(video_encoder : VideoEncoder, video_quality : int) -> List[Command]:
if video_encoder in [ 'libx264', 'libx264rgb', 'libx265' ]:
video_compression = numpy.round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ])).astype(int).item()
return [ '-crf', str(video_compression) ]
@@ -188,7 +205,7 @@ def set_video_quality(video_encoder : VideoEncoder, video_quality : int) -> Comm
return []
def set_video_preset(video_encoder : VideoEncoder, video_preset : VideoPreset) -> Commands:
def set_video_preset(video_encoder : VideoEncoder, video_preset : VideoPreset) -> List[Command]:
if video_encoder in [ 'libx264', 'libx264rgb', 'libx265' ]:
return [ '-preset', video_preset ]
if video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
@@ -200,19 +217,25 @@ def set_video_preset(video_encoder : VideoEncoder, video_preset : VideoPreset) -
return []
def set_video_fps(video_fps : Fps) -> Commands:
return [ '-vf', 'framerate=fps=' + str(video_fps) ]
def set_video_fps(video_fps : Fps) -> List[Command]:
return [ '-vf', 'fps=' + str(video_fps) ]
def set_video_duration(video_duration : Duration) -> Commands:
def set_video_duration(video_duration : Duration) -> List[Command]:
return [ '-t', str(video_duration) ]
def capture_video() -> Commands:
def keep_video_alpha(video_encoder : VideoEncoder) -> List[Command]:
if video_encoder == 'libvpx-vp9':
return [ '-vf', 'format=yuva420p' ]
return []
def capture_video() -> List[Command]:
return [ '-f', 'rawvideo', '-pix_fmt', 'rgb24' ]
def ignore_video_stream() -> Commands:
def ignore_video_stream() -> List[Command]:
return [ '-vn' ]

View File

@@ -36,6 +36,8 @@ def get_file_format(file_path : str) -> Optional[str]:
return 'jpeg'
if file_extension == '.tif':
return 'tiff'
if file_extension == '.mpg':
return 'mpeg'
return file_extension.lstrip('.')
return None
@@ -99,7 +101,7 @@ def has_video(video_paths : List[str]) -> bool:
def are_videos(video_paths : List[str]) -> bool:
if video_paths:
return any(map(is_video, video_paths))
return all(map(is_video, video_paths))
return False

View File

@@ -5,9 +5,10 @@ from typing import List
from onnxruntime import InferenceSession
from facefusion import logger, process_manager, state_manager, wording
from facefusion import logger, process_manager, state_manager, translator
from facefusion.app_context import detect_app_context
from facefusion.execution import create_inference_session_providers
from facefusion.common_helper import is_windows
from facefusion.execution import create_inference_session_providers, has_execution_provider
from facefusion.exit_helper import fatal_exit
from facefusion.filesystem import get_file_name, is_file
from facefusion.time_helper import calculate_end_time
@@ -57,9 +58,11 @@ def clear_inference_pool(module_name : str, model_names : List[str]) -> None:
execution_providers = resolve_execution_providers(module_name)
app_context = detect_app_context()
if is_windows() and has_execution_provider('directml'):
INFERENCE_POOL_SET[app_context].clear()
for execution_device_id in execution_device_ids:
inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
if INFERENCE_POOL_SET.get(app_context).get(inference_context):
del INFERENCE_POOL_SET[app_context][inference_context]
@@ -71,11 +74,11 @@ def create_inference_session(model_path : str, execution_device_id : str, execut
try:
inference_session_providers = create_inference_session_providers(execution_device_id, execution_providers)
inference_session = InferenceSession(model_path, providers = inference_session_providers)
logger.debug(wording.get('loading_model_succeeded').format(model_name = model_file_name, seconds = calculate_end_time(start_time)), __name__)
logger.debug(translator.get('loading_model_succeeded').format(model_name = model_file_name, seconds = calculate_end_time(start_time)), __name__)
return inference_session
except Exception:
logger.error(wording.get('loading_model_failed').format(model_name = model_file_name), __name__)
logger.error(translator.get('loading_model_failed').format(model_name = model_file_name), __name__)
fatal_exit(1)

View File

@@ -7,18 +7,24 @@ from argparse import ArgumentParser, HelpFormatter
from functools import partial
from types import FrameType
from facefusion import metadata, wording
from facefusion import metadata
from facefusion.common_helper import is_linux, is_windows
LOCALS =\
{
'install_dependency': 'install the {dependency} package',
'skip_conda': 'skip the conda environment check',
'conda_not_activated': 'conda is not activated'
}
ONNXRUNTIME_SET =\
{
'default': ('onnxruntime', '1.22.1')
'default': ('onnxruntime', '1.23.2')
}
if is_windows() or is_linux():
ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.22.0')
ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.22.0')
ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.23.2')
ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.23.0')
if is_windows():
ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.17.3')
ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.23.0')
if is_linux():
ONNXRUNTIME_SET['rocm'] = ('onnxruntime-rocm', '1.21.0')
@@ -26,8 +32,8 @@ if is_linux():
def cli() -> None:
signal.signal(signal.SIGINT, signal_exit)
program = ArgumentParser(formatter_class = partial(HelpFormatter, max_help_position = 50))
program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIME_SET.keys(), required = True)
program.add_argument('--skip-conda', help = wording.get('help.skip_conda'), action = 'store_true')
program.add_argument('--onnxruntime', help = LOCALS.get('install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIME_SET.keys(), required = True)
program.add_argument('--skip-conda', help = LOCALS.get('skip_conda'), action = 'store_true')
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
run(program)
@@ -42,7 +48,7 @@ def run(program : ArgumentParser) -> None:
onnxruntime_name, onnxruntime_version = ONNXRUNTIME_SET.get(args.onnxruntime)
if not args.skip_conda and not has_conda:
sys.stdout.write(wording.get('conda_not_activated') + os.linesep)
sys.stdout.write(LOCALS.get('conda_not_activated') + os.linesep)
sys.exit(1)
with open('requirements.txt') as file:
@@ -92,5 +98,3 @@ def run(program : ArgumentParser) -> None:
subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ])
if args.onnxruntime == 'directml':
subprocess.call([ shutil.which('pip'), 'install', 'numpy==1.26.4', '--force-reinstall' ])

View File

@@ -1,15 +1,15 @@
from datetime import datetime
from typing import Optional, Tuple
from typing import List, Optional, Tuple
from facefusion.jobs import job_manager
from facefusion.time_helper import describe_time_ago
from facefusion.types import JobStatus, TableContents, TableHeaders
from facefusion.types import JobStatus, TableContent, TableHeader
def compose_job_list(job_status : JobStatus) -> Tuple[TableHeaders, TableContents]:
def compose_job_list(job_status : JobStatus) -> Tuple[List[TableHeader], List[List[TableContent]]]:
jobs = job_manager.find_jobs(job_status)
job_headers : TableHeaders = [ 'job id', 'steps', 'date created', 'date updated', 'job status' ]
job_contents : TableContents = []
job_headers : List[TableHeader] = [ 'job id', 'steps', 'date created', 'date updated', 'job status' ]
job_contents : List[List[TableContent]] = []
for index, job_id in enumerate(jobs):
if job_manager.validate_job(job_id):

View File

@@ -17,11 +17,11 @@ def get_step_keys() -> List[str]:
return JOB_STORE.get('step_keys')
def register_job_keys(step_keys : List[str]) -> None:
for step_key in step_keys:
JOB_STORE['job_keys'].append(step_key)
def register_step_keys(job_keys : List[str]) -> None:
def register_job_keys(job_keys : List[str]) -> None:
for job_key in job_keys:
JOB_STORE['step_keys'].append(job_key)
JOB_STORE['job_keys'].append(job_key)
def register_step_keys(step_keys : List[str]) -> None:
for step_key in step_keys:
JOB_STORE['step_keys'].append(step_key)

274
facefusion/locals.py Normal file
View File

@@ -0,0 +1,274 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'conda_not_activated': 'conda is not activated',
'python_not_supported': 'python version is not supported, upgrade to {version} or higher',
'curl_not_installed': 'curl is not installed',
'ffmpeg_not_installed': 'ffmpeg is not installed',
'creating_temp': 'creating temporary resources',
'extracting_frames': 'extracting frames with a resolution of {resolution} and {fps} frames per second',
'extracting_frames_succeeded': 'extracting frames succeeded',
'extracting_frames_failed': 'extracting frames failed',
'analysing': 'analysing',
'extracting': 'extracting',
'streaming': 'streaming',
'processing': 'processing',
'merging': 'merging',
'downloading': 'downloading',
'temp_frames_not_found': 'temporary frames not found',
'copying_image': 'copying image with a resolution of {resolution}',
'copying_image_succeeded': 'copying image succeeded',
'copying_image_failed': 'copying image failed',
'finalizing_image': 'finalizing image with a resolution of {resolution}',
'finalizing_image_succeeded': 'finalizing image succeeded',
'finalizing_image_skipped': 'finalizing image skipped',
'merging_video': 'merging video with a resolution of {resolution} and {fps} frames per second',
'merging_video_succeeded': 'merging video succeeded',
'merging_video_failed': 'merging video failed',
'skipping_audio': 'skipping audio',
'replacing_audio_succeeded': 'replacing audio succeeded',
'replacing_audio_skipped': 'replacing audio skipped',
'restoring_audio_succeeded': 'restoring audio succeeded',
'restoring_audio_skipped': 'restoring audio skipped',
'clearing_temp': 'clearing temporary resources',
'processing_stopped': 'processing stopped',
'processing_image_succeeded': 'processing to image succeeded in {seconds} seconds',
'processing_image_failed': 'processing to image failed',
'processing_video_succeeded': 'processing to video succeeded in {seconds} seconds',
'processing_video_failed': 'processing to video failed',
'choose_image_source': 'choose an image for the source',
'choose_audio_source': 'choose an audio for the source',
'choose_video_target': 'choose a video for the target',
'choose_image_or_video_target': 'choose an image or video for the target',
'specify_image_or_video_output': 'specify the output image or video within a directory',
'match_target_and_output_extension': 'match the target and output extension',
'no_source_face_detected': 'no source face detected',
'processor_not_loaded': 'processor {processor} could not be loaded',
'processor_not_implemented': 'processor {processor} not implemented correctly',
'ui_layout_not_loaded': 'ui layout {ui_layout} could not be loaded',
'ui_layout_not_implemented': 'ui layout {ui_layout} not implemented correctly',
'stream_not_loaded': 'stream {stream_mode} could not be loaded',
'stream_not_supported': 'stream not supported',
'job_created': 'job {job_id} created',
'job_not_created': 'job {job_id} not created',
'job_submitted': 'job {job_id} submitted',
'job_not_submitted': 'job {job_id} not submitted',
'job_all_submitted': 'jobs submitted',
'job_all_not_submitted': 'jobs not submitted',
'job_deleted': 'job {job_id} deleted',
'job_not_deleted': 'job {job_id} not deleted',
'job_all_deleted': 'jobs deleted',
'job_all_not_deleted': 'jobs not deleted',
'job_step_added': 'step added to job {job_id}',
'job_step_not_added': 'step not added to job {job_id}',
'job_remix_step_added': 'step {step_index} remixed from job {job_id}',
'job_remix_step_not_added': 'step {step_index} not remixed from job {job_id}',
'job_step_inserted': 'step {step_index} inserted to job {job_id}',
'job_step_not_inserted': 'step {step_index} not inserted to job {job_id}',
'job_step_removed': 'step {step_index} removed from job {job_id}',
'job_step_not_removed': 'step {step_index} not removed from job {job_id}',
'running_job': 'running queued job {job_id}',
'running_jobs': 'running all queued jobs',
'retrying_job': 'retrying failed job {job_id}',
'retrying_jobs': 'retrying all failed jobs',
'processing_job_succeeded': 'processing of job {job_id} succeeded',
'processing_jobs_succeeded': 'processing of all jobs succeeded',
'processing_job_failed': 'processing of job {job_id} failed',
'processing_jobs_failed': 'processing of all jobs failed',
'processing_step': 'processing step {step_current} of {step_total}',
'validating_hash_succeeded': 'validating hash for {hash_file_name} succeeded',
'validating_hash_failed': 'validating hash for {hash_file_name} failed',
'validating_source_succeeded': 'validating source for {source_file_name} succeeded',
'validating_source_failed': 'validating source for {source_file_name} failed',
'deleting_corrupt_source': 'deleting corrupt source for {source_file_name}',
'loading_model_succeeded': 'loading model {model_name} succeeded in {seconds} seconds',
'loading_model_failed': 'loading model {model_name} failed',
'time_ago_now': 'just now',
'time_ago_minutes': '{minutes} minutes ago',
'time_ago_hours': '{hours} hours and {minutes} minutes ago',
'time_ago_days': '{days} days, {hours} hours and {minutes} minutes ago',
'point': '.',
'comma': ',',
'colon': ':',
'question_mark': '?',
'exclamation_mark': '!',
'help':
{
'install_dependency': 'choose the variant of {dependency} to install',
'skip_conda': 'skip the conda environment check',
'config_path': 'choose the config file to override defaults',
'temp_path': 'specify the directory for the temporary resources',
'jobs_path': 'specify the directory to store jobs',
'source_paths': 'choose the image or audio paths',
'target_path': 'choose the image or video path',
'output_path': 'specify the image or video within a directory',
'source_pattern': 'choose the image or audio pattern',
'target_pattern': 'choose the image or video pattern',
'output_pattern': 'specify the image or video pattern',
'face_detector_model': 'choose the model responsible for detecting the faces',
'face_detector_size': 'specify the frame size provided to the face detector',
'face_detector_margin': 'apply top, right, bottom and left margin to the frame',
'face_detector_angles': 'specify the angles to rotate the frame before detecting faces',
'face_detector_score': 'filter the detected faces based on the confidence score',
'face_landmarker_model': 'choose the model responsible for detecting the face landmarks',
'face_landmarker_score': 'filter the detected face landmarks based on the confidence score',
'face_selector_mode': 'use reference based tracking or simple matching',
'face_selector_order': 'specify the order of the detected faces',
'face_selector_age_start': 'filter the detected faces based on the starting age',
'face_selector_age_end': 'filter the detected faces based on the ending age',
'face_selector_gender': 'filter the detected faces based on their gender',
'face_selector_race': 'filter the detected faces based on their race',
'reference_face_position': 'specify the position used to create the reference face',
'reference_face_distance': 'specify the similarity between the reference face and target face',
'reference_frame_number': 'specify the frame used to create the reference face',
'face_occluder_model': 'choose the model responsible for the occlusion mask',
'face_parser_model': 'choose the model responsible for the region mask',
'face_mask_types': 'mix and match different face mask types (choices: {choices})',
'face_mask_areas': 'choose the items used for the area mask (choices: {choices})',
'face_mask_regions': 'choose the items used for the region mask (choices: {choices})',
'face_mask_blur': 'specify the degree of blur applied to the box mask',
'face_mask_padding': 'apply top, right, bottom and left padding to the box mask',
'voice_extractor_model': 'choose the model responsible for extracting the voices',
'trim_frame_start': 'specify the starting frame of the target video',
'trim_frame_end': 'specify the ending frame of the target video',
'temp_frame_format': 'specify the temporary resources format',
'keep_temp': 'keep the temporary resources after processing',
'output_image_quality': 'specify the image quality which translates to the image compression',
'output_image_scale': 'specify the image scale based on the target image',
'output_audio_encoder': 'specify the encoder used for the audio',
'output_audio_quality': 'specify the audio quality which translates to the audio compression',
'output_audio_volume': 'specify the audio volume based on the target video',
'output_video_encoder': 'specify the encoder used for the video',
'output_video_preset': 'balance fast video processing and video file size',
'output_video_quality': 'specify the video quality which translates to the video compression',
'output_video_scale': 'specify the video scale based on the target video',
'output_video_fps': 'specify the video fps based on the target video',
'processors': 'load a single or multiple processors (choices: {choices}, ...)',
'background-remover-model': 'choose the model responsible for removing the background',
'background-remover-color': 'apply red, green blue and alpha values of the background',
'open_browser': 'open the browser once the program is ready',
'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)',
'ui_workflow': 'choose the ui workflow',
'download_providers': 'download using different providers (choices: {choices}, ...)',
'download_scope': 'specify the download scope',
'benchmark_mode': 'choose the benchmark mode',
'benchmark_resolutions': 'choose the resolutions for the benchmarks (choices: {choices}, ...)',
'benchmark_cycle_count': 'specify the amount of cycles per benchmark',
'execution_device_ids': 'specify the devices used for processing',
'execution_providers': 'inference using different providers (choices: {choices}, ...)',
'execution_thread_count': 'specify the amount of parallel threads while processing',
'video_memory_strategy': 'balance fast processing and low VRAM usage',
'system_memory_limit': 'limit the available RAM that can be used while processing',
'log_level': 'adjust the message severity displayed in the terminal',
'halt_on_error': 'halt the program once an error occurred',
'run': 'run the program',
'headless_run': 'run the program in headless mode',
'batch_run': 'run the program in batch mode',
'force_download': 'force automate downloads and exit',
'benchmark': 'benchmark the program',
'job_id': 'specify the job id',
'job_status': 'specify the job status',
'step_index': 'specify the step index',
'job_list': 'list jobs by status',
'job_create': 'create a drafted job',
'job_submit': 'submit a drafted job to become a queued job',
'job_submit_all': 'submit all drafted jobs to become a queued jobs',
'job_delete': 'delete a drafted, queued, failed or completed job',
'job_delete_all': 'delete all drafted, queued, failed and completed jobs',
'job_add_step': 'add a step to a drafted job',
'job_remix_step': 'remix a previous step from a drafted job',
'job_insert_step': 'insert a step to a drafted job',
'job_remove_step': 'remove a step from a drafted job',
'job_run': 'run a queued job',
'job_run_all': 'run all queued jobs',
'job_retry': 'retry a failed job',
'job_retry_all': 'retry all failed jobs'
},
'about':
{
'fund': 'fund training server',
'subscribe': 'become a member',
'join': 'join our community'
},
'uis':
{
'apply_button': 'APPLY',
'benchmark_mode_dropdown': 'BENCHMARK MODE',
'benchmark_cycle_count_slider': 'BENCHMARK CYCLE COUNT',
'benchmark_resolutions_checkbox_group': 'BENCHMARK RESOLUTIONS',
'clear_button': 'CLEAR',
'common_options_checkbox_group': 'OPTIONS',
'download_providers_checkbox_group': 'DOWNLOAD PROVIDERS',
'execution_providers_checkbox_group': 'EXECUTION PROVIDERS',
'execution_thread_count_slider': 'EXECUTION THREAD COUNT',
'face_detector_angles_checkbox_group': 'FACE DETECTOR ANGLES',
'face_detector_model_dropdown': 'FACE DETECTOR MODEL',
'face_detector_margin_slider': 'FACE DETECTOR MARGIN',
'face_detector_score_slider': 'FACE DETECTOR SCORE',
'face_detector_size_dropdown': 'FACE DETECTOR SIZE',
'face_landmarker_model_dropdown': 'FACE LANDMARKER MODEL',
'face_landmarker_score_slider': 'FACE LANDMARKER SCORE',
'face_mask_blur_slider': 'FACE MASK BLUR',
'face_mask_padding_bottom_slider': 'FACE MASK PADDING BOTTOM',
'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT',
'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT',
'face_mask_padding_top_slider': 'FACE MASK PADDING TOP',
'face_mask_areas_checkbox_group': 'FACE MASK AREAS',
'face_mask_regions_checkbox_group': 'FACE MASK REGIONS',
'face_mask_types_checkbox_group': 'FACE MASK TYPES',
'face_selector_age_range_slider': 'FACE SELECTOR AGE',
'face_selector_gender_dropdown': 'FACE SELECTOR GENDER',
'face_selector_mode_dropdown': 'FACE SELECTOR MODE',
'face_selector_order_dropdown': 'FACE SELECTOR ORDER',
'face_selector_race_dropdown': 'FACE SELECTOR RACE',
'face_occluder_model_dropdown': 'FACE OCCLUDER MODEL',
'face_parser_model_dropdown': 'FACE PARSER MODEL',
'voice_extractor_model_dropdown': 'VOICE EXTRACTOR MODEL',
'job_list_status_checkbox_group': 'JOB STATUS',
'job_manager_job_action_dropdown': 'JOB_ACTION',
'job_manager_job_id_dropdown': 'JOB ID',
'job_manager_step_index_dropdown': 'STEP INDEX',
'job_runner_job_action_dropdown': 'JOB ACTION',
'job_runner_job_id_dropdown': 'JOB ID',
'log_level_dropdown': 'LOG LEVEL',
'output_audio_encoder_dropdown': 'OUTPUT AUDIO ENCODER',
'output_audio_quality_slider': 'OUTPUT AUDIO QUALITY',
'output_audio_volume_slider': 'OUTPUT AUDIO VOLUME',
'output_image_or_video': 'OUTPUT',
'output_image_quality_slider': 'OUTPUT IMAGE QUALITY',
'output_image_scale_slider': 'OUTPUT IMAGE SCALE',
'output_path_textbox': 'OUTPUT PATH',
'output_video_encoder_dropdown': 'OUTPUT VIDEO ENCODER',
'output_video_fps_slider': 'OUTPUT VIDEO FPS',
'output_video_preset_dropdown': 'OUTPUT VIDEO PRESET',
'output_video_quality_slider': 'OUTPUT VIDEO QUALITY',
'output_video_scale_slider': 'OUTPUT VIDEO SCALE',
'preview_frame_slider': 'PREVIEW FRAME',
'preview_image': 'PREVIEW',
'preview_mode_dropdown': 'PREVIEW MODE',
'preview_resolution_dropdown': 'PREVIEW RESOLUTION',
'processors_checkbox_group': 'PROCESSORS',
'reference_face_distance_slider': 'REFERENCE FACE DISTANCE',
'reference_face_gallery': 'REFERENCE FACE',
'refresh_button': 'REFRESH',
'source_file': 'SOURCE',
'start_button': 'START',
'stop_button': 'STOP',
'system_memory_limit_slider': 'SYSTEM MEMORY LIMIT',
'target_file': 'TARGET',
'temp_frame_format_dropdown': 'TEMP FRAME FORMAT',
'terminal_textbox': 'TERMINAL',
'trim_frame_slider': 'TRIM FRAME',
'ui_workflow': 'UI WORKFLOW',
'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY',
'webcam_fps_slider': 'WEBCAM FPS',
'webcam_image': 'WEBCAM',
'webcam_device_id_dropdown': 'WEBCAM DEVICE ID',
'webcam_mode_radio': 'WEBCAM MODE',
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION'
}
}
}

View File

@@ -4,7 +4,7 @@ METADATA =\
{
'name': 'FaceFusion',
'description': 'Industry leading face manipulation platform',
'version': '3.4.2',
'version': '3.5.0',
'license': 'OpenRAIL-AS',
'author': 'Henry Ruhs',
'url': 'https://facefusion.io'
@@ -12,6 +12,4 @@ METADATA =\
def get(key : str) -> Optional[str]:
if key in METADATA:
return METADATA.get(key)
return None
return METADATA.get(key)

View File

@@ -1,17 +1,29 @@
from typing import List, Optional
from facefusion.types import Fps, Padding
from facefusion.types import Color, Fps, Padding
def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:
if padding and len(padding) == 1:
return tuple([ padding[0] ] * 4) #type:ignore[return-value]
if padding and len(padding) == 2:
return tuple([ padding[0], padding[1], padding[0], padding[1] ]) #type:ignore[return-value]
if padding and len(padding) == 3:
return tuple([ padding[0], padding[1], padding[2], padding[1] ]) #type:ignore[return-value]
if padding and len(padding) == 4:
return tuple(padding) #type:ignore[return-value]
def normalize_color(channels : Optional[List[int]]) -> Optional[Color]:
if channels and len(channels) == 1:
return tuple([ channels[0], channels[0], channels[0], 255 ]) #type:ignore[return-value]
if channels and len(channels) == 2:
return tuple([ channels[0], channels[1], channels[0], 255 ]) #type:ignore[return-value]
if channels and len(channels) == 3:
return tuple([ channels[0], channels[1], channels[2], 255 ]) #type:ignore[return-value]
if channels and len(channels) == 4:
return tuple(channels) #type:ignore[return-value]
return None
def normalize_space(spaces : Optional[List[int]]) -> Optional[Padding]:
if spaces and len(spaces) == 1:
return tuple([spaces[0]] * 4) #type:ignore[return-value]
if spaces and len(spaces) == 2:
return tuple([spaces[0], spaces[1], spaces[0], spaces[1]]) #type:ignore[return-value]
if spaces and len(spaces) == 3:
return tuple([spaces[0], spaces[1], spaces[2], spaces[1]]) #type:ignore[return-value]
if spaces and len(spaces) == 4:
return tuple(spaces) #type:ignore[return-value]
return None

View File

@@ -1,226 +1,27 @@
from typing import List, Sequence
from facefusion.common_helper import create_float_range, create_int_range
from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path
from facefusion.processors.types import AgeModifierModel, DeepSwapperModel, ExpressionRestorerArea, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperModel, FaceSwapperSet, FaceSwapperWeight, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ]
deep_swapper_models : List[DeepSwapperModel] =\
[
'druuzil/adam_levine_320',
'druuzil/adrianne_palicki_384',
'druuzil/agnetha_falskog_224',
'druuzil/alan_ritchson_320',
'druuzil/alicia_vikander_320',
'druuzil/amber_midthunder_320',
'druuzil/andras_arato_384',
'druuzil/andrew_tate_320',
'druuzil/angelina_jolie_384',
'druuzil/anne_hathaway_320',
'druuzil/anya_chalotra_320',
'druuzil/arnold_schwarzenegger_320',
'druuzil/benjamin_affleck_320',
'druuzil/benjamin_stiller_384',
'druuzil/bradley_pitt_224',
'druuzil/brie_larson_384',
'druuzil/bruce_campbell_384',
'druuzil/bryan_cranston_320',
'druuzil/catherine_blanchett_352',
'druuzil/christian_bale_320',
'druuzil/christopher_hemsworth_320',
'druuzil/christoph_waltz_384',
'druuzil/cillian_murphy_320',
'druuzil/cobie_smulders_256',
'druuzil/dwayne_johnson_384',
'druuzil/edward_norton_320',
'druuzil/elisabeth_shue_320',
'druuzil/elizabeth_olsen_384',
'druuzil/elon_musk_320',
'druuzil/emily_blunt_320',
'druuzil/emma_stone_384',
'druuzil/emma_watson_320',
'druuzil/erin_moriarty_384',
'druuzil/eva_green_320',
'druuzil/ewan_mcgregor_320',
'druuzil/florence_pugh_320',
'druuzil/freya_allan_320',
'druuzil/gary_cole_224',
'druuzil/gigi_hadid_224',
'druuzil/harrison_ford_384',
'druuzil/hayden_christensen_320',
'druuzil/heath_ledger_320',
'druuzil/henry_cavill_448',
'druuzil/hugh_jackman_384',
'druuzil/idris_elba_320',
'druuzil/jack_nicholson_320',
'druuzil/james_carrey_384',
'druuzil/james_mcavoy_320',
'druuzil/james_varney_320',
'druuzil/jason_momoa_320',
'druuzil/jason_statham_320',
'druuzil/jennifer_connelly_384',
'druuzil/jimmy_donaldson_320',
'druuzil/jordan_peterson_384',
'druuzil/karl_urban_224',
'druuzil/kate_beckinsale_384',
'druuzil/laurence_fishburne_384',
'druuzil/lili_reinhart_320',
'druuzil/luke_evans_384',
'druuzil/mads_mikkelsen_384',
'druuzil/mary_winstead_320',
'druuzil/margaret_qualley_384',
'druuzil/melina_juergens_320',
'druuzil/michael_fassbender_320',
'druuzil/michael_fox_320',
'druuzil/millie_bobby_brown_320',
'druuzil/morgan_freeman_320',
'druuzil/patrick_stewart_224',
'druuzil/rachel_weisz_384',
'druuzil/rebecca_ferguson_320',
'druuzil/scarlett_johansson_320',
'druuzil/shannen_doherty_384',
'druuzil/seth_macfarlane_384',
'druuzil/thomas_cruise_320',
'druuzil/thomas_hanks_384',
'druuzil/william_murray_384',
'druuzil/zoe_saldana_384',
'edel/emma_roberts_224',
'edel/ivanka_trump_224',
'edel/lize_dzjabrailova_224',
'edel/sidney_sweeney_224',
'edel/winona_ryder_224',
'iperov/alexandra_daddario_224',
'iperov/alexei_navalny_224',
'iperov/amber_heard_224',
'iperov/dilraba_dilmurat_224',
'iperov/elon_musk_224',
'iperov/emilia_clarke_224',
'iperov/emma_watson_224',
'iperov/erin_moriarty_224',
'iperov/jackie_chan_224',
'iperov/james_carrey_224',
'iperov/jason_statham_320',
'iperov/keanu_reeves_320',
'iperov/margot_robbie_224',
'iperov/natalie_dormer_224',
'iperov/nicolas_coppola_224',
'iperov/robert_downey_224',
'iperov/rowan_atkinson_224',
'iperov/ryan_reynolds_224',
'iperov/scarlett_johansson_224',
'iperov/sylvester_stallone_224',
'iperov/thomas_cruise_224',
'iperov/thomas_holland_224',
'iperov/vin_diesel_224',
'iperov/vladimir_putin_224',
'jen/angelica_trae_288',
'jen/ella_freya_224',
'jen/emma_myers_320',
'jen/evie_pickerill_224',
'jen/kang_hyewon_320',
'jen/maddie_mead_224',
'jen/nicole_turnbull_288',
'mats/alica_schmidt_320',
'mats/ashley_alexiss_224',
'mats/billie_eilish_224',
'mats/brie_larson_224',
'mats/cara_delevingne_224',
'mats/carolin_kebekus_224',
'mats/chelsea_clinton_224',
'mats/claire_boucher_224',
'mats/corinna_kopf_224',
'mats/florence_pugh_224',
'mats/hillary_clinton_224',
'mats/jenna_fischer_224',
'mats/kim_jisoo_320',
'mats/mica_suarez_320',
'mats/shailene_woodley_224',
'mats/shraddha_kapoor_320',
'mats/yu_jimin_352',
'rumateus/alison_brie_224',
'rumateus/amber_heard_224',
'rumateus/angelina_jolie_224',
'rumateus/aubrey_plaza_224',
'rumateus/bridget_regan_224',
'rumateus/cobie_smulders_224',
'rumateus/deborah_woll_224',
'rumateus/dua_lipa_224',
'rumateus/emma_stone_224',
'rumateus/hailee_steinfeld_224',
'rumateus/hilary_duff_224',
'rumateus/jessica_alba_224',
'rumateus/jessica_biel_224',
'rumateus/john_cena_224',
'rumateus/kim_kardashian_224',
'rumateus/kristen_bell_224',
'rumateus/lucy_liu_224',
'rumateus/margot_robbie_224',
'rumateus/megan_fox_224',
'rumateus/meghan_markle_224',
'rumateus/millie_bobby_brown_224',
'rumateus/natalie_portman_224',
'rumateus/nicki_minaj_224',
'rumateus/olivia_wilde_224',
'rumateus/shay_mitchell_224',
'rumateus/sophie_turner_224',
'rumateus/taylor_swift_224'
]
custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
if custom_model_file_paths:
for model_file_path in custom_model_file_paths:
model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
deep_swapper_models.append(model_id)
expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ]
expression_restorer_areas : List[ExpressionRestorerArea] = [ 'upper-face', 'lower-face' ]
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask' ]
face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ]
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
face_swapper_set : FaceSwapperSet =\
{
'blendswap_256': [ '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'ghost_1_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'ghost_2_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'ghost_3_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hififace_unofficial_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1a_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1b_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1c_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'inswapper_128': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'inswapper_128_fp16': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'simswap_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'simswap_unofficial_512': [ '512x512', '768x768', '1024x1024' ],
'uniface_256': [ '256x256', '512x512', '768x768', '1024x1024' ]
}
face_swapper_models : List[FaceSwapperModel] = list(face_swapper_set.keys())
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ]
frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ]
frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4', 'ultra_sharp_2_x4' ]
lip_syncer_models : List[LipSyncerModel] = [ 'edtalk_256', 'wav2lip_96', 'wav2lip_gan_96' ]
age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1)
deep_swapper_morph_range : Sequence[int] = create_int_range(0, 100, 1)
expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1)
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_gaze_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_open_ratio_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_lip_open_ratio_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_grim_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_pout_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_purse_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_smile_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_position_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_position_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_pitch_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_yaw_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_roll_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
face_swapper_weight_range : Sequence[FaceSwapperWeight] = create_float_range(0.0, 1.0, 0.05)
frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
lip_syncer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
from facefusion.processors.modules.age_modifier.choices import age_modifier_direction_range, age_modifier_models # noqa: F401
from facefusion.processors.modules.background_remover.choices import background_remover_color_range, background_remover_models # noqa: F401
from facefusion.processors.modules.deep_swapper.choices import deep_swapper_models, deep_swapper_morph_range # noqa: F401
from facefusion.processors.modules.expression_restorer.choices import expression_restorer_areas, expression_restorer_factor_range, expression_restorer_models # noqa: F401
from facefusion.processors.modules.face_debugger.choices import face_debugger_items # noqa: F401
from facefusion.processors.modules.face_editor.choices import ( # noqa: F401
face_editor_eye_gaze_horizontal_range,
face_editor_eye_gaze_vertical_range,
face_editor_eye_open_ratio_range,
face_editor_eyebrow_direction_range,
face_editor_head_pitch_range,
face_editor_head_roll_range,
face_editor_head_yaw_range,
face_editor_lip_open_ratio_range,
face_editor_models,
face_editor_mouth_grim_range,
face_editor_mouth_position_horizontal_range,
face_editor_mouth_position_vertical_range,
face_editor_mouth_pout_range,
face_editor_mouth_purse_range,
face_editor_mouth_smile_range,
)
from facefusion.processors.modules.face_enhancer.choices import face_enhancer_blend_range, face_enhancer_models, face_enhancer_weight_range # noqa: F401
from facefusion.processors.modules.face_swapper.choices import face_swapper_models, face_swapper_set, face_swapper_weight_range # noqa: F401
from facefusion.processors.modules.frame_colorizer.choices import frame_colorizer_blend_range, frame_colorizer_models, frame_colorizer_sizes # noqa: F401
from facefusion.processors.modules.frame_enhancer.choices import frame_enhancer_blend_range, frame_enhancer_models # noqa: F401
from facefusion.processors.modules.lip_syncer.choices import lip_syncer_models, lip_syncer_weight_range # noqa: F401

View File

@@ -2,9 +2,10 @@ import importlib
from types import ModuleType
from typing import Any, List
from facefusion import logger, wording
from facefusion import logger, translator
from facefusion.exit_helper import hard_exit
PROCESSORS_METHODS =\
[
'get_inference_pool',
@@ -20,16 +21,16 @@ PROCESSORS_METHODS =\
def load_processor_module(processor : str) -> Any:
try:
processor_module = importlib.import_module('facefusion.processors.modules.' + processor)
processor_module = importlib.import_module('facefusion.processors.modules.' + processor + '.core')
for method_name in PROCESSORS_METHODS:
if not hasattr(processor_module, method_name):
raise NotImplementedError
except ModuleNotFoundError as exception:
logger.error(wording.get('processor_not_loaded').format(processor = processor), __name__)
logger.error(translator.get('processor_not_loaded').format(processor = processor), __name__)
logger.debug(exception.msg, __name__)
hard_exit(1)
except NotImplementedError:
logger.error(wording.get('processor_not_implemented').format(processor = processor), __name__)
logger.error(translator.get('processor_not_implemented').format(processor = processor), __name__)
hard_exit(1)
return processor_module

View File

@@ -0,0 +1,8 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.age_modifier.types import AgeModifierModel
age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ]
age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1)

View File

@@ -7,7 +7,7 @@ import numpy
import facefusion.choices
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_int_metavar, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
@@ -16,8 +16,9 @@ from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import AgeModifierDirection, AgeModifierInputs
from facefusion.processors.modules.age_modifier import choices as age_modifier_choices
from facefusion.processors.modules.age_modifier.types import AgeModifierDirection, AgeModifierInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -30,6 +31,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'styleganex_age':
{
'__metadata__':
{
'vendor': 'williamyang1991',
'license': 'S-Lab-1.0',
'year': 2023
},
'hashes':
{
'age_modifier':
@@ -80,8 +87,8 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--age-modifier-model', help = wording.get('help.age_modifier_model'), default = config.get_str_value('processors', 'age_modifier_model', 'styleganex_age'), choices = processors_choices.age_modifier_models)
group_processors.add_argument('--age-modifier-direction', help = wording.get('help.age_modifier_direction'), type = int, default = config.get_int_value('processors', 'age_modifier_direction', '0'), choices = processors_choices.age_modifier_direction_range, metavar = create_int_metavar(processors_choices.age_modifier_direction_range))
group_processors.add_argument('--age-modifier-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'age_modifier_model', 'styleganex_age'), choices = age_modifier_choices.age_modifier_models)
group_processors.add_argument('--age-modifier-direction', help = translator.get('help.direction', __package__), type = int, default = config.get_int_value('processors', 'age_modifier_direction', '0'), choices = age_modifier_choices.age_modifier_direction_range, metavar = create_int_metavar(age_modifier_choices.age_modifier_direction_range))
facefusion.jobs.job_store.register_step_keys([ 'age_modifier_model', 'age_modifier_direction' ])
@@ -99,13 +106,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -197,10 +204,11 @@ def normalize_extend_frame(extend_vision_frame : VisionFrame) -> VisionFrame:
return extend_vision_frame
def process_frame(inputs : AgeModifierInputs) -> VisionFrame:
def process_frame(inputs : AgeModifierInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -208,4 +216,4 @@ def process_frame(inputs : AgeModifierInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = modify_age(target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,18 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for aging the face',
'direction': 'specify the direction in which the age should be modified'
},
'uis':
{
'direction_slider': 'AGE MODIFIER DIRECTION',
'model_dropdown': 'AGE MODIFIER MODEL'
}
}
}

View File

@@ -0,0 +1,17 @@
from typing import Any, Literal, TypeAlias, TypedDict
from numpy.typing import NDArray
from facefusion.types import Mask, VisionFrame
AgeModifierInputs = TypedDict('AgeModifierInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
AgeModifierModel = Literal['styleganex_age']
AgeModifierDirection : TypeAlias = NDArray[Any]

View File

@@ -0,0 +1,8 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.background_remover.types import BackgroundRemoverModel
background_remover_models : List[BackgroundRemoverModel] = [ 'ben_2', 'birefnet_general', 'birefnet_portrait', 'isnet_general', 'modnet', 'ormbg', 'rmbg_1.4', 'rmbg_2.0', 'silueta', 'u2net_cloth', 'u2net_general', 'u2net_human', 'u2netp' ]
background_remover_color_range : Sequence[int] = create_int_range(0, 255, 1)

View File

@@ -0,0 +1,524 @@
from argparse import ArgumentParser
from functools import lru_cache, partial
from typing import List, Tuple
import cv2
import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.normalizer import normalize_color
from facefusion.processors.modules.background_remover import choices as background_remover_choices
from facefusion.processors.modules.background_remover.types import BackgroundRemoverInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.sanitizer import sanitize_int_range
from facefusion.thread_helper import thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, ExecutionProvider, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, VisionFrame
from facefusion.vision import read_static_image, read_static_video_frame
@lru_cache()
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
return\
{
'ben_2':
{
'__metadata__':
{
'vendor': 'PramaLLC',
'license': 'MIT',
'year': 2025
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'ben_2.hash'),
'path': resolve_relative_path('../.assets/models/ben_2.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'ben_2.onnx'),
'path': resolve_relative_path('../.assets/models/ben_2.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'birefnet_general':
{
'__metadata__':
{
'vendor': 'ZhengPeng7',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'birefnet_general.hash'),
'path': resolve_relative_path('../.assets/models/birefnet_general.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'birefnet_general.onnx'),
'path': resolve_relative_path('../.assets/models/birefnet_general.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'birefnet_portrait':
{
'__metadata__':
{
'vendor': 'ZhengPeng7',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'birefnet_portrait.hash'),
'path': resolve_relative_path('../.assets/models/birefnet_portrait.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'birefnet_portrait.onnx'),
'path': resolve_relative_path('../.assets/models/birefnet_portrait.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'isnet_general':
{
'__metadata__':
{
'vendor': 'xuebinqin',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'isnet_general.hash'),
'path': resolve_relative_path('../.assets/models/isnet_general.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'isnet_general.onnx'),
'path': resolve_relative_path('../.assets/models/isnet_general.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'modnet':
{
'__metadata__':
{
'vendor': 'ZHKKKe',
'license': 'Apache-2.0',
'year': 2020
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'modnet.hash'),
'path': resolve_relative_path('../.assets/models/modnet.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'modnet.onnx'),
'path': resolve_relative_path('../.assets/models/modnet.onnx')
}
},
'size': (512, 512),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 0.5, 0.5, 0.5 ]
},
'ormbg':
{
'__metadata__':
{
'vendor': 'schirrmacher',
'license': 'Apache-2.0',
'year': 2024
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'ormbg.hash'),
'path': resolve_relative_path('../.assets/models/ormbg.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'ormbg.onnx'),
'path': resolve_relative_path('../.assets/models/ormbg.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.0, 0.0, 0.0 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'rmbg_1.4':
{
'__metadata__':
{
'vendor': 'Bria',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'rmbg_1.4.hash'),
'path': resolve_relative_path('../.assets/models/rmbg_1.4.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'rmbg_1.4.onnx'),
'path': resolve_relative_path('../.assets/models/rmbg_1.4.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.5, 0.5, 0.5 ],
'standard_deviation': [ 1.0, 1.0, 1.0 ]
},
'rmbg_2.0':
{
'__metadata__':
{
'vendor': 'Bria',
'license': 'Non-Commercial',
'year': 2024
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'rmbg_2.0.hash'),
'path': resolve_relative_path('../.assets/models/rmbg_2.0.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'rmbg_2.0.onnx'),
'path': resolve_relative_path('../.assets/models/rmbg_2.0.onnx')
}
},
'size': (1024, 1024),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'silueta':
{
'__metadata__':
{
'vendor': 'Kikedao',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'silueta.hash'),
'path': resolve_relative_path('../.assets/models/silueta.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'silueta.onnx'),
'path': resolve_relative_path('../.assets/models/silueta.onnx')
}
},
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'u2net_cloth':
{
'__metadata__':
{
'vendor': 'levindabhi',
'license': 'MIT',
'year': 2021
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_cloth.hash'),
'path': resolve_relative_path('../.assets/models/u2net_cloth.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_cloth.onnx'),
'path': resolve_relative_path('../.assets/models/u2net_cloth.onnx')
}
},
'size': (768, 768),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'u2net_general':
{
'__metadata__':
{
'vendor': 'xuebinqin',
'license': 'Apache-2.0',
'year': 2020
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_general.hash'),
'path': resolve_relative_path('../.assets/models/u2net_general.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_general.onnx'),
'path': resolve_relative_path('../.assets/models/u2net_general.onnx')
}
},
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'u2net_human':
{
'__metadata__':
{
'vendor': 'xuebinqin',
'license': 'Apache-2.0',
'year': 2021
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_human.hash'),
'path': resolve_relative_path('../.assets/models/u2net_human.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2net_human.onnx'),
'path': resolve_relative_path('../.assets/models/u2net_human.onnx')
}
},
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
},
'u2netp':
{
'__metadata__':
{
'vendor': 'xuebinqin',
'license': 'Apache-2.0',
'year': 2021
},
'hashes':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2netp.hash'),
'path': resolve_relative_path('../.assets/models/u2netp.hash')
}
},
'sources':
{
'background_remover':
{
'url': resolve_download_url('models-3.5.0', 'u2netp.onnx'),
'path': resolve_relative_path('../.assets/models/u2netp.onnx')
}
},
'size': (320, 320),
'mean': [ 0.485, 0.456, 0.406 ],
'standard_deviation': [ 0.229, 0.224, 0.225 ]
}
}
def get_inference_pool() -> InferencePool:
model_names = [ state_manager.get_item('background_remover_model') ]
model_source_set = get_model_options().get('sources')
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
def clear_inference_pool() -> None:
model_names = [ state_manager.get_item('background_remover_model') ]
inference_manager.clear_inference_pool(__name__, model_names)
def resolve_execution_providers() -> List[ExecutionProvider]:
if is_macos() and has_execution_provider('coreml'):
return [ 'cpu' ]
return state_manager.get_item('execution_providers')
def get_model_options() -> ModelOptions:
model_name = state_manager.get_item('background_remover_model')
return create_static_model_set('full').get(model_name)
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--background-remover-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'background_remover_model', 'rmbg_2.0'), choices = background_remover_choices.background_remover_models)
group_processors.add_argument('--background-remover-color', help = translator.get('help.color', __package__), type = partial(sanitize_int_range, int_range = background_remover_choices.background_remover_color_range), default = config.get_int_list('processors', 'background_remover_color', '0 0 0 0'), nargs ='+')
facefusion.jobs.job_store.register_step_keys([ 'background_remover_model', 'background_remover_color' ])
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('background_remover_model', args.get('background_remover_model'))
apply_state_item('background_remover_color', normalize_color(args.get('background_remover_color')))
def pre_check() -> bool:
model_hash_set = get_model_options().get('hashes')
model_source_set = get_model_options().get('sources')
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
def post_process() -> None:
read_static_image.cache_clear()
read_static_video_frame.cache_clear()
video_manager.clear_video_pool()
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
clear_inference_pool()
if state_manager.get_item('video_memory_strategy') == 'strict':
content_analyser.clear_inference_pool()
def remove_background(temp_vision_frame : VisionFrame) -> Tuple[VisionFrame, Mask]:
temp_vision_mask = forward(prepare_temp_frame(temp_vision_frame))
temp_vision_mask = normalize_vision_mask(temp_vision_mask)
temp_vision_mask = cv2.resize(temp_vision_mask, temp_vision_frame.shape[:2][::-1])
temp_vision_frame = apply_background_color(temp_vision_frame, temp_vision_mask)
return temp_vision_frame, temp_vision_mask
def forward(temp_vision_frame : VisionFrame) -> VisionFrame:
background_remover = get_inference_pool().get('background_remover')
model_name = state_manager.get_item('background_remover_model')
with thread_semaphore():
remove_vision_frame = background_remover.run(None,
{
'input': temp_vision_frame
})[0]
if model_name == 'u2net_cloth':
remove_vision_frame = numpy.argmax(remove_vision_frame, axis = 1)
return remove_vision_frame
def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
model_size = get_model_options().get('size')
model_mean = get_model_options().get('mean')
model_standard_deviation = get_model_options().get('standard_deviation')
temp_vision_frame = cv2.resize(temp_vision_frame, model_size)
temp_vision_frame = temp_vision_frame[:, :, ::-1] / 255.0
temp_vision_frame = (temp_vision_frame - model_mean) / model_standard_deviation
temp_vision_frame = temp_vision_frame.transpose(2, 0, 1)
temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32)
return temp_vision_frame
def normalize_vision_mask(temp_vision_mask : Mask) -> Mask:
temp_vision_mask = numpy.squeeze(temp_vision_mask).clip(0, 1) * 255
temp_vision_mask = numpy.clip(temp_vision_mask, 0, 255).astype(numpy.uint8)
return temp_vision_mask
def apply_background_color(temp_vision_frame : VisionFrame, temp_vision_mask : Mask) -> VisionFrame:
background_remover_color = state_manager.get_item('background_remover_color')
temp_vision_mask = temp_vision_mask.astype(numpy.float32) / 255
temp_vision_mask = numpy.expand_dims(temp_vision_mask, axis = 2)
temp_vision_mask = (1 - temp_vision_mask) * background_remover_color[-1] / 255
color_frame = numpy.zeros_like(temp_vision_frame)
color_frame[:, :, 0] = background_remover_color[2]
color_frame[:, :, 1] = background_remover_color[1]
color_frame[:, :, 2] = background_remover_color[0]
temp_vision_frame = temp_vision_frame * (1 - temp_vision_mask) + color_frame * temp_vision_mask
temp_vision_frame = temp_vision_frame.astype(numpy.uint8)
return temp_vision_frame
def process_frame(inputs : BackgroundRemoverInputs) -> ProcessorOutputs:
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_frame, temp_vision_mask = remove_background(temp_vision_frame)
temp_vision_mask = numpy.minimum.reduce([ temp_vision_mask, inputs.get('temp_vision_mask') ])
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,21 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for removing the background',
'color': 'apply red, green blue and alpha values to the background'
},
'uis':
{
'model_dropdown': 'BACKGROUND REMOVER MODEL',
'color_red_number': 'BACKGROUND COLOR RED',
'color_green_number': 'BACKGROUND COLOR GREEN',
'color_blue_number': 'BACKGROUND COLOR BLUE',
'color_alpha_number': 'BACKGROUND COLOR ALPHA'
}
}
}

View File

@@ -0,0 +1,12 @@
from typing import Literal, TypedDict
from facefusion.types import Mask, VisionFrame
BackgroundRemoverInputs = TypedDict('BackgroundRemoverInputs',
{
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
BackgroundRemoverModel = Literal['ben_2', 'birefnet_general', 'birefnet_portrait', 'isnet_general', 'modnet', 'ormbg', 'rmbg_1.4', 'rmbg_2.0', 'silueta', 'u2net_cloth', 'u2net_general', 'u2net_human', 'u2netp']

View File

@@ -0,0 +1,176 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path
from facefusion.processors.modules.deep_swapper.types import DeepSwapperModel
deep_swapper_models : List[DeepSwapperModel] =\
[
'druuzil/adam_levine_320',
'druuzil/adrianne_palicki_384',
'druuzil/agnetha_falskog_224',
'druuzil/alan_ritchson_320',
'druuzil/alicia_vikander_320',
'druuzil/amber_midthunder_320',
'druuzil/andras_arato_384',
'druuzil/andrew_tate_320',
'druuzil/angelina_jolie_384',
'druuzil/anne_hathaway_320',
'druuzil/anya_chalotra_320',
'druuzil/arnold_schwarzenegger_320',
'druuzil/benjamin_affleck_320',
'druuzil/benjamin_stiller_384',
'druuzil/bradley_pitt_224',
'druuzil/brie_larson_384',
'druuzil/bruce_campbell_384',
'druuzil/bryan_cranston_320',
'druuzil/catherine_blanchett_352',
'druuzil/christian_bale_320',
'druuzil/christopher_hemsworth_320',
'druuzil/christoph_waltz_384',
'druuzil/cillian_murphy_320',
'druuzil/cobie_smulders_256',
'druuzil/dwayne_johnson_384',
'druuzil/edward_norton_320',
'druuzil/elisabeth_shue_320',
'druuzil/elizabeth_olsen_384',
'druuzil/elon_musk_320',
'druuzil/emily_blunt_320',
'druuzil/emma_stone_384',
'druuzil/emma_watson_320',
'druuzil/erin_moriarty_384',
'druuzil/eva_green_320',
'druuzil/ewan_mcgregor_320',
'druuzil/florence_pugh_320',
'druuzil/freya_allan_320',
'druuzil/gary_cole_224',
'druuzil/gigi_hadid_224',
'druuzil/harrison_ford_384',
'druuzil/hayden_christensen_320',
'druuzil/heath_ledger_320',
'druuzil/henry_cavill_448',
'druuzil/hugh_jackman_384',
'druuzil/idris_elba_320',
'druuzil/jack_nicholson_320',
'druuzil/james_carrey_384',
'druuzil/james_mcavoy_320',
'druuzil/james_varney_320',
'druuzil/jason_momoa_320',
'druuzil/jason_statham_320',
'druuzil/jennifer_connelly_384',
'druuzil/jimmy_donaldson_320',
'druuzil/jordan_peterson_384',
'druuzil/karl_urban_224',
'druuzil/kate_beckinsale_384',
'druuzil/laurence_fishburne_384',
'druuzil/lili_reinhart_320',
'druuzil/luke_evans_384',
'druuzil/mads_mikkelsen_384',
'druuzil/mary_winstead_320',
'druuzil/margaret_qualley_384',
'druuzil/melina_juergens_320',
'druuzil/michael_fassbender_320',
'druuzil/michael_fox_320',
'druuzil/millie_bobby_brown_320',
'druuzil/morgan_freeman_320',
'druuzil/patrick_stewart_224',
'druuzil/rachel_weisz_384',
'druuzil/rebecca_ferguson_320',
'druuzil/scarlett_johansson_320',
'druuzil/shannen_doherty_384',
'druuzil/seth_macfarlane_384',
'druuzil/thomas_cruise_320',
'druuzil/thomas_hanks_384',
'druuzil/william_murray_384',
'druuzil/zoe_saldana_384',
'edel/emma_roberts_224',
'edel/ivanka_trump_224',
'edel/lize_dzjabrailova_224',
'edel/sidney_sweeney_224',
'edel/winona_ryder_224',
'iperov/alexandra_daddario_224',
'iperov/alexei_navalny_224',
'iperov/amber_heard_224',
'iperov/dilraba_dilmurat_224',
'iperov/elon_musk_224',
'iperov/emilia_clarke_224',
'iperov/emma_watson_224',
'iperov/erin_moriarty_224',
'iperov/jackie_chan_224',
'iperov/james_carrey_224',
'iperov/jason_statham_320',
'iperov/keanu_reeves_320',
'iperov/margot_robbie_224',
'iperov/natalie_dormer_224',
'iperov/nicolas_coppola_224',
'iperov/robert_downey_224',
'iperov/rowan_atkinson_224',
'iperov/ryan_reynolds_224',
'iperov/scarlett_johansson_224',
'iperov/sylvester_stallone_224',
'iperov/thomas_cruise_224',
'iperov/thomas_holland_224',
'iperov/vin_diesel_224',
'iperov/vladimir_putin_224',
'jen/angelica_trae_288',
'jen/ella_freya_224',
'jen/emma_myers_320',
'jen/evie_pickerill_224',
'jen/kang_hyewon_320',
'jen/maddie_mead_224',
'jen/nicole_turnbull_288',
'mats/alica_schmidt_320',
'mats/ashley_alexiss_224',
'mats/billie_eilish_224',
'mats/brie_larson_224',
'mats/cara_delevingne_224',
'mats/carolin_kebekus_224',
'mats/chelsea_clinton_224',
'mats/claire_boucher_224',
'mats/corinna_kopf_224',
'mats/florence_pugh_224',
'mats/hillary_clinton_224',
'mats/jenna_fischer_224',
'mats/kim_jisoo_320',
'mats/mica_suarez_320',
'mats/shailene_woodley_224',
'mats/shraddha_kapoor_320',
'mats/yu_jimin_352',
'rumateus/alison_brie_224',
'rumateus/amber_heard_224',
'rumateus/angelina_jolie_224',
'rumateus/aubrey_plaza_224',
'rumateus/bridget_regan_224',
'rumateus/cobie_smulders_224',
'rumateus/deborah_woll_224',
'rumateus/dua_lipa_224',
'rumateus/emma_stone_224',
'rumateus/hailee_steinfeld_224',
'rumateus/hilary_duff_224',
'rumateus/jessica_alba_224',
'rumateus/jessica_biel_224',
'rumateus/john_cena_224',
'rumateus/kim_kardashian_224',
'rumateus/kristen_bell_224',
'rumateus/lucy_liu_224',
'rumateus/margot_robbie_224',
'rumateus/megan_fox_224',
'rumateus/meghan_markle_224',
'rumateus/millie_bobby_brown_224',
'rumateus/natalie_portman_224',
'rumateus/nicki_minaj_224',
'rumateus/olivia_wilde_224',
'rumateus/shay_mitchell_224',
'rumateus/sophie_turner_224',
'rumateus/taylor_swift_224'
]
custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
if custom_model_file_paths:
for model_file_path in custom_model_file_paths:
model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
deep_swapper_models.append(model_id)
deep_swapper_morph_range : Sequence[int] = create_int_range(0, 100, 1)

View File

@@ -8,7 +8,7 @@ from cv2.typing import Size
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider
from facefusion.face_analyser import scale_face
@@ -16,8 +16,9 @@ from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import get_file_name, in_directory, is_image, is_video, resolve_file_paths, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import DeepSwapperInputs, DeepSwapperMorph
from facefusion.processors.modules.deep_swapper import choices as deep_swapper_choices
from facefusion.processors.modules.deep_swapper.types import DeepSwapperInputs, DeepSwapperMorph
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -275,8 +276,8 @@ def get_model_size() -> Size:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors', 'deep_swapper_model', 'iperov/elon_musk_224'), choices = processors_choices.deep_swapper_models)
group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors', 'deep_swapper_morph', '100'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range))
group_processors.add_argument('--deep-swapper-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'deep_swapper_model', 'iperov/elon_musk_224'), choices = deep_swapper_choices.deep_swapper_models)
group_processors.add_argument('--deep-swapper-morph', help = translator.get('help.morph', __package__), type = int, default = config.get_int_value('processors', 'deep_swapper_morph', '100'), choices = deep_swapper_choices.deep_swapper_morph_range, metavar = create_int_metavar(deep_swapper_choices.deep_swapper_morph_range))
facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ])
@@ -296,13 +297,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -408,10 +409,11 @@ def prepare_crop_mask(crop_source_mask : Mask, crop_target_mask : Mask) -> Mask:
return crop_mask
def process_frame(inputs : DeepSwapperInputs) -> VisionFrame:
def process_frame(inputs : DeepSwapperInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -419,6 +421,4 @@ def process_frame(inputs : DeepSwapperInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = swap_face(target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,18 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for swapping the face',
'morph': 'morph between source face and target faces'
},
'uis':
{
'model_dropdown': 'DEEP SWAPPER MODEL',
'morph_slider': 'DEEP SWAPPER MORPH'
}
}
}

View File

@@ -0,0 +1,17 @@
from typing import Any, TypeAlias, TypedDict
from numpy.typing import NDArray
from facefusion.types import Mask, VisionFrame
DeepSwapperInputs = TypedDict('DeepSwapperInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
DeepSwapperModel : TypeAlias = str
DeepSwapperMorph : TypeAlias = NDArray[Any]

View File

@@ -0,0 +1,10 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.expression_restorer.types import ExpressionRestorerArea, ExpressionRestorerModel
expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ]
expression_restorer_areas : List[ExpressionRestorerArea] = [ 'upper-face', 'lower-face' ]
expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1)

View File

@@ -7,7 +7,7 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
@@ -15,9 +15,10 @@ from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.live_portrait import create_rotation, limit_expression
from facefusion.processors.types import ExpressionRestorerInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
from facefusion.processors.modules.expression_restorer import choices as expression_restorer_choices
from facefusion.processors.modules.expression_restorer.types import ExpressionRestorerInputs
from facefusion.processors.types import LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw, ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -30,6 +31,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'live_portrait':
{
'__metadata__':
{
'vendor': 'KwaiVGI',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'feature_extractor':
@@ -92,9 +99,9 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--expression-restorer-model', help = wording.get('help.expression_restorer_model'), default = config.get_str_value('processors', 'expression_restorer_model', 'live_portrait'), choices = processors_choices.expression_restorer_models)
group_processors.add_argument('--expression-restorer-factor', help = wording.get('help.expression_restorer_factor'), type = int, default = config.get_int_value('processors', 'expression_restorer_factor', '80'), choices = processors_choices.expression_restorer_factor_range, metavar = create_int_metavar(processors_choices.expression_restorer_factor_range))
group_processors.add_argument('--expression-restorer-areas', help = wording.get('help.expression_restorer_areas').format(choices = ', '.join(processors_choices.expression_restorer_areas)), default = config.get_str_list('processors', 'expression_restorer_areas', ' '.join(processors_choices.expression_restorer_areas)), choices = processors_choices.expression_restorer_areas, nargs = '+', metavar = 'EXPRESSION_RESTORER_AREAS')
group_processors.add_argument('--expression-restorer-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'expression_restorer_model', 'live_portrait'), choices = expression_restorer_choices.expression_restorer_models)
group_processors.add_argument('--expression-restorer-factor', help = translator.get('help.factor', __package__), type = int, default = config.get_int_value('processors', 'expression_restorer_factor', '80'), choices = expression_restorer_choices.expression_restorer_factor_range, metavar = create_int_metavar(expression_restorer_choices.expression_restorer_factor_range))
group_processors.add_argument('--expression-restorer-areas', help = translator.get('help.areas', __package__).format(choices = ', '.join(expression_restorer_choices.expression_restorer_areas)), default = config.get_str_list('processors', 'expression_restorer_areas', ' '.join(expression_restorer_choices.expression_restorer_areas)), choices = expression_restorer_choices.expression_restorer_areas, nargs ='+', metavar ='EXPRESSION_RESTORER_AREAS')
facefusion.jobs.job_store.register_step_keys([ 'expression_restorer_model', 'expression_restorer_factor', 'expression_restorer_areas' ])
@@ -113,16 +120,16 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode == 'stream':
logger.error(wording.get('stream_not_supported') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('stream_not_supported') + translator.get('exclamation_mark'), __name__)
return False
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -248,10 +255,11 @@ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
return crop_vision_frame
def process_frame(inputs : ExpressionRestorerInputs) -> VisionFrame:
def process_frame(inputs : ExpressionRestorerInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -259,4 +267,4 @@ def process_frame(inputs : ExpressionRestorerInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = restore_expression(target_face, target_vision_frame, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,20 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for restoring the expression',
'factor': 'restore factor of expression from the target face',
'areas': 'choose the items used for the expression areas (choices: {choices})'
},
'uis':
{
'model_dropdown': 'EXPRESSION RESTORER MODEL',
'factor_slider': 'EXPRESSION RESTORER FACTOR',
'areas_checkbox_group': 'EXPRESSION RESTORER AREAS'
}
}
}

View File

@@ -0,0 +1,16 @@
from typing import List, Literal, TypedDict
from facefusion.types import Mask, VisionFrame
ExpressionRestorerInputs = TypedDict('ExpressionRestorerInputs',
{
'reference_vision_frame' : VisionFrame,
'source_vision_frames' : List[VisionFrame],
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
ExpressionRestorerModel = Literal['live_portrait']
ExpressionRestorerArea = Literal['upper-face', 'lower-face']

View File

@@ -0,0 +1,5 @@
from typing import List
from facefusion.processors.modules.face_debugger.types import FaceDebuggerItem
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask' ]

View File

@@ -5,14 +5,15 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, state_manager, translator, video_manager
from facefusion.face_analyser import scale_face
from facefusion.face_helper import warp_face_by_face_landmark_5
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import in_directory, is_image, is_video, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import FaceDebuggerInputs
from facefusion.processors.modules.face_debugger import choices as face_debugger_choices
from facefusion.processors.modules.face_debugger.types import FaceDebuggerInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.types import ApplyStateItem, Args, Face, InferencePool, ProcessMode, VisionFrame
from facefusion.vision import read_static_image, read_static_video_frame
@@ -29,7 +30,7 @@ def clear_inference_pool() -> None:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(processors_choices.face_debugger_items)), default = config.get_str_list('processors', 'face_debugger_items', 'face-landmark-5/68 face-mask'), choices = processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
group_processors.add_argument('--face-debugger-items', help = translator.get('help.items', __package__).format(choices = ', '.join(face_debugger_choices.face_debugger_items)), default = config.get_str_list('processors', 'face_debugger_items', 'face-landmark-5/68 face-mask'), choices = face_debugger_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
facefusion.jobs.job_store.register_step_keys([ 'face_debugger_items' ])
@@ -43,13 +44,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -92,6 +93,7 @@ def debug_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
def draw_bounding_box(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
box_color = 0, 0, 255
border_color = 100, 100, 255
bounding_box = target_face.bounding_box.astype(numpy.int32)
@@ -113,6 +115,7 @@ def draw_bounding_box(target_face : Face, temp_vision_frame : VisionFrame) -> Vi
def draw_face_mask(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
crop_masks = []
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
face_landmark_5 = target_face.landmark_set.get('5')
face_landmark_68 = target_face.landmark_set.get('68')
face_landmark_5_68 = target_face.landmark_set.get('5/68')
@@ -152,6 +155,7 @@ def draw_face_mask(target_face : Face, temp_vision_frame : VisionFrame) -> Visio
def draw_face_landmark_5(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
face_landmark_5 = target_face.landmark_set.get('5')
point_color = 0, 0, 255
@@ -165,6 +169,7 @@ def draw_face_landmark_5(target_face : Face, temp_vision_frame : VisionFrame) ->
def draw_face_landmark_5_68(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
face_landmark_5 = target_face.landmark_set.get('5')
face_landmark_5_68 = target_face.landmark_set.get('5/68')
point_color = 0, 255, 0
@@ -182,6 +187,7 @@ def draw_face_landmark_5_68(target_face : Face, temp_vision_frame : VisionFrame)
def draw_face_landmark_68(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
face_landmark_68 = target_face.landmark_set.get('68')
face_landmark_68_5 = target_face.landmark_set.get('68/5')
point_color = 0, 255, 0
@@ -199,6 +205,7 @@ def draw_face_landmark_68(target_face : Face, temp_vision_frame : VisionFrame) -
def draw_face_landmark_68_5(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
temp_vision_frame = numpy.ascontiguousarray(temp_vision_frame)
face_landmark_68_5 = target_face.landmark_set.get('68/5')
point_color = 255, 255, 0
@@ -211,10 +218,11 @@ def draw_face_landmark_68_5(target_face : Face, temp_vision_frame : VisionFrame)
return temp_vision_frame
def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame:
def process_frame(inputs : FaceDebuggerInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -222,6 +230,6 @@ def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = debug_face(target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,16 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'items': 'load a single or multiple processors (choices: {choices})'
},
'uis':
{
'items_checkbox_group': 'FACE DEBUGGER ITEMS'
}
}
}

View File

@@ -0,0 +1,13 @@
from typing import Literal, TypedDict
from facefusion.types import Mask, VisionFrame
FaceDebuggerInputs = TypedDict('FaceDebuggerInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask']

View File

@@ -0,0 +1,21 @@
from typing import List, Sequence
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.face_editor.types import FaceEditorModel
face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ]
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_gaze_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_eye_open_ratio_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_lip_open_ratio_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_grim_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_pout_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_purse_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_smile_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_position_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_mouth_position_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_pitch_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_yaw_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
face_editor_head_roll_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)

View File

@@ -7,7 +7,7 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_float_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
@@ -15,9 +15,10 @@ from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_
from facefusion.face_masker import create_box_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.live_portrait import create_rotation, limit_angle, limit_expression
from facefusion.processors.types import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
from facefusion.processors.modules.face_editor import choices as face_editor_choices
from facefusion.processors.modules.face_editor.types import FaceEditorInputs
from facefusion.processors.types import LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw, ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -30,6 +31,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'live_portrait':
{
'__metadata__':
{
'vendor': 'KwaiVGI',
'license': 'MIT',
'year': 2024
},
'hashes':
{
'feature_extractor':
@@ -122,21 +129,21 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models)
group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range))
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range))
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range))
group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range))
group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range))
group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range))
group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range))
group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range))
group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range))
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range))
group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range))
group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range))
group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range))
group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range))
group_processors.add_argument('--face-editor-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = face_editor_choices.face_editor_models)
group_processors.add_argument('--face-editor-eyebrow-direction', help = translator.get('help.eyebrow_direction', __package__), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = face_editor_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(face_editor_choices.face_editor_eyebrow_direction_range))
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = translator.get('help.eye_gaze_horizontal', __package__), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = face_editor_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(face_editor_choices.face_editor_eye_gaze_horizontal_range))
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = translator.get('help.eye_gaze_vertical', __package__), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = face_editor_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(face_editor_choices.face_editor_eye_gaze_vertical_range))
group_processors.add_argument('--face-editor-eye-open-ratio', help = translator.get('help.eye_open_ratio', __package__), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = face_editor_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(face_editor_choices.face_editor_eye_open_ratio_range))
group_processors.add_argument('--face-editor-lip-open-ratio', help = translator.get('help.lip_open_ratio', __package__), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = face_editor_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(face_editor_choices.face_editor_lip_open_ratio_range))
group_processors.add_argument('--face-editor-mouth-grim', help = translator.get('help.mouth_grim', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = face_editor_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_grim_range))
group_processors.add_argument('--face-editor-mouth-pout', help = translator.get('help.mouth_pout', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = face_editor_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_pout_range))
group_processors.add_argument('--face-editor-mouth-purse', help = translator.get('help.mouth_purse', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = face_editor_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_purse_range))
group_processors.add_argument('--face-editor-mouth-smile', help = translator.get('help.mouth_smile', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = face_editor_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_smile_range))
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = translator.get('help.mouth_position_horizontal', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = face_editor_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_position_horizontal_range))
group_processors.add_argument('--face-editor-mouth-position-vertical', help = translator.get('help.mouth_position_vertical', __package__), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = face_editor_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(face_editor_choices.face_editor_mouth_position_vertical_range))
group_processors.add_argument('--face-editor-head-pitch', help = translator.get('help.head_pitch', __package__), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = face_editor_choices.face_editor_head_pitch_range, metavar = create_float_metavar(face_editor_choices.face_editor_head_pitch_range))
group_processors.add_argument('--face-editor-head-yaw', help = translator.get('help.head_yaw', __package__), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = face_editor_choices.face_editor_head_yaw_range, metavar = create_float_metavar(face_editor_choices.face_editor_head_yaw_range))
group_processors.add_argument('--face-editor-head-roll', help = translator.get('help.head_roll', __package__), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = face_editor_choices.face_editor_head_roll_range, metavar = create_float_metavar(face_editor_choices.face_editor_head_roll_range))
facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ])
@@ -167,13 +174,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -477,10 +484,11 @@ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
return crop_vision_frame
def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
def process_frame(inputs : FaceEditorInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -488,4 +496,4 @@ def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = edit_face(target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,44 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for editing the face',
'eyebrow_direction': 'specify the eyebrow direction',
'eye_gaze_horizontal': 'specify the horizontal eye gaze',
'eye_gaze_vertical': 'specify the vertical eye gaze',
'eye_open_ratio': 'specify the ratio of eye opening',
'lip_open_ratio': 'specify the ratio of lip opening',
'mouth_grim': 'specify the mouth grim',
'mouth_pout': 'specify the mouth pout',
'mouth_purse': 'specify the mouth purse',
'mouth_smile': 'specify the mouth smile',
'mouth_position_horizontal': 'specify the horizontal mouth position',
'mouth_position_vertical': 'specify the vertical mouth position',
'head_pitch': 'specify the head pitch',
'head_yaw': 'specify the head yaw',
'head_roll': 'specify the head roll'
},
'uis':
{
'eyebrow_direction_slider': 'FACE EDITOR EYEBROW DIRECTION',
'eye_gaze_horizontal_slider': 'FACE EDITOR EYE GAZE HORIZONTAL',
'eye_gaze_vertical_slider': 'FACE EDITOR EYE GAZE VERTICAL',
'eye_open_ratio_slider': 'FACE EDITOR EYE OPEN RATIO',
'head_pitch_slider': 'FACE EDITOR HEAD PITCH',
'head_roll_slider': 'FACE EDITOR HEAD ROLL',
'head_yaw_slider': 'FACE EDITOR HEAD YAW',
'lip_open_ratio_slider': 'FACE EDITOR LIP OPEN RATIO',
'model_dropdown': 'FACE EDITOR MODEL',
'mouth_grim_slider': 'FACE EDITOR MOUTH GRIM',
'mouth_position_horizontal_slider': 'FACE EDITOR MOUTH POSITION HORIZONTAL',
'mouth_position_vertical_slider': 'FACE EDITOR MOUTH POSITION VERTICAL',
'mouth_pout_slider': 'FACE EDITOR MOUTH POUT',
'mouth_purse_slider': 'FACE EDITOR MOUTH PURSE',
'mouth_smile_slider': 'FACE EDITOR MOUTH SMILE'
}
}
}

View File

@@ -0,0 +1,13 @@
from typing import Literal, TypedDict
from facefusion.types import Mask, VisionFrame
FaceEditorInputs = TypedDict('FaceEditorInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FaceEditorModel = Literal['live_portrait']

View File

@@ -0,0 +1,10 @@
from typing import List, Sequence
from facefusion.common_helper import create_float_range, create_int_range
from facefusion.processors.modules.face_enhancer.types import FaceEnhancerModel
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)

View File

@@ -5,7 +5,7 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_float_metavar, create_int_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.face_analyser import scale_face
@@ -13,8 +13,9 @@ from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
from facefusion.face_masker import create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import FaceEnhancerInputs, FaceEnhancerWeight
from facefusion.processors.modules.face_enhancer import choices as face_enhancer_choices
from facefusion.processors.modules.face_enhancer.types import FaceEnhancerInputs, FaceEnhancerWeight
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -27,6 +28,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'codeformer':
{
'__metadata__':
{
'vendor': 'sczhou',
'license': 'S-Lab-1.0',
'year': 2022
},
'hashes':
{
'face_enhancer':
@@ -48,6 +55,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gfpgan_1.2':
{
'__metadata__':
{
'vendor': 'TencentARC',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_enhancer':
@@ -69,6 +82,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gfpgan_1.3':
{
'__metadata__':
{
'vendor': 'TencentARC',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_enhancer':
@@ -90,6 +109,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gfpgan_1.4':
{
'__metadata__':
{
'vendor': 'TencentARC',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_enhancer':
@@ -111,6 +136,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gpen_bfr_256':
{
'__metadata__':
{
'vendor': 'yangxy',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'face_enhancer':
@@ -132,6 +163,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gpen_bfr_512':
{
'__metadata__':
{
'vendor': 'yangxy',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'face_enhancer':
@@ -153,6 +190,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gpen_bfr_1024':
{
'__metadata__':
{
'vendor': 'yangxy',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'face_enhancer':
@@ -174,6 +217,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'gpen_bfr_2048':
{
'__metadata__':
{
'vendor': 'yangxy',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'face_enhancer':
@@ -195,6 +244,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'restoreformer_plus_plus':
{
'__metadata__':
{
'vendor': 'wzhouxiff',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_enhancer':
@@ -237,9 +292,9 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('processors', 'face_enhancer_model', 'gfpgan_1.4'), choices = processors_choices.face_enhancer_models)
group_processors.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('processors', 'face_enhancer_blend', '80'), choices = processors_choices.face_enhancer_blend_range, metavar = create_int_metavar(processors_choices.face_enhancer_blend_range))
group_processors.add_argument('--face-enhancer-weight', help = wording.get('help.face_enhancer_weight'), type = float, default = config.get_float_value('processors', 'face_enhancer_weight', '0.5'), choices = processors_choices.face_enhancer_weight_range, metavar = create_float_metavar(processors_choices.face_enhancer_weight_range))
group_processors.add_argument('--face-enhancer-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'face_enhancer_model', 'gfpgan_1.4'), choices = face_enhancer_choices.face_enhancer_models)
group_processors.add_argument('--face-enhancer-blend', help = translator.get('help.blend', __package__), type = int, default = config.get_int_value('processors', 'face_enhancer_blend', '80'), choices = face_enhancer_choices.face_enhancer_blend_range, metavar = create_int_metavar(face_enhancer_choices.face_enhancer_blend_range))
group_processors.add_argument('--face-enhancer-weight', help = translator.get('help.weight', __package__), type = float, default = config.get_float_value('processors', 'face_enhancer_weight', '0.5'), choices = face_enhancer_choices.face_enhancer_weight_range, metavar = create_float_metavar(face_enhancer_choices.face_enhancer_weight_range))
facefusion.jobs.job_store.register_step_keys([ 'face_enhancer_model', 'face_enhancer_blend', 'face_enhancer_weight' ])
@@ -258,13 +313,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -356,10 +411,11 @@ def blend_paste_frame(temp_vision_frame : VisionFrame, paste_vision_frame : Visi
return temp_vision_frame
def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame:
def process_frame(inputs : FaceEnhancerInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -367,4 +423,4 @@ def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = enhance_face(target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,20 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for enhancing the face',
'blend': 'blend the enhanced into the previous face',
'weight': 'specify the degree of weight applied to the face'
},
'uis':
{
'blend_slider': 'FACE ENHANCER BLEND',
'model_dropdown': 'FACE ENHANCER MODEL',
'weight_slider': 'FACE ENHANCER WEIGHT'
}
}
}

View File

@@ -0,0 +1,17 @@
from typing import Any, Literal, TypeAlias, TypedDict
from numpy.typing import NDArray
from facefusion.types import Mask, VisionFrame
FaceEnhancerInputs = TypedDict('FaceEnhancerInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus']
FaceEnhancerWeight : TypeAlias = NDArray[Any]

View File

@@ -0,0 +1,25 @@
from typing import List, Sequence
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.face_swapper.types import FaceSwapperModel, FaceSwapperSet, FaceSwapperWeight
face_swapper_set : FaceSwapperSet =\
{
'blendswap_256': [ '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'ghost_1_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'ghost_2_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'ghost_3_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hififace_unofficial_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1a_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1b_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'hyperswap_1c_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'inswapper_128': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'inswapper_128_fp16': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ],
'simswap_256': [ '256x256', '512x512', '768x768', '1024x1024' ],
'simswap_unofficial_512': [ '512x512', '768x768', '1024x1024' ],
'uniface_256': [ '256x256', '512x512', '768x768', '1024x1024' ]
}
face_swapper_models : List[FaceSwapperModel] = list(face_swapper_set.keys())
face_swapper_weight_range : Sequence[FaceSwapperWeight] = create_float_range(0.0, 1.0, 0.05)

View File

@@ -8,7 +8,7 @@ import numpy
import facefusion.choices
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import get_first, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
@@ -18,9 +18,10 @@ from facefusion.face_masker import create_area_mask, create_box_mask, create_occ
from facefusion.face_selector import select_faces, sort_faces_by_order
from facefusion.filesystem import filter_image_paths, has_image, in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.model_helper import get_static_model_initializer
from facefusion.processors import choices as processors_choices
from facefusion.processors.modules.face_swapper import choices as face_swapper_choices
from facefusion.processors.modules.face_swapper.types import FaceSwapperInputs
from facefusion.processors.pixel_boost import explode_pixel_boost, implode_pixel_boost
from facefusion.processors.types import FaceSwapperInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -33,6 +34,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'blendswap_256':
{
'__metadata__':
{
'vendor': 'mapooon',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'face_swapper':
@@ -57,6 +64,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'ghost_1_256':
{
'__metadata__':
{
'vendor': 'ai-forever',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_swapper':
@@ -91,6 +104,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'ghost_2_256':
{
'__metadata__':
{
'vendor': 'ai-forever',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_swapper':
@@ -125,6 +144,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'ghost_3_256':
{
'__metadata__':
{
'vendor': 'ai-forever',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'face_swapper':
@@ -159,6 +184,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'hififace_unofficial_256':
{
'__metadata__':
{
'vendor': 'GuijiAI',
'license': 'Unknown',
'year': 2021
},
'hashes':
{
'face_swapper':
@@ -193,6 +224,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'hyperswap_1a_256':
{
'__metadata__':
{
'vendor': 'FaceFusion',
'license': 'ResearchRAIL',
'year': 2025
},
'hashes':
{
'face_swapper':
@@ -217,6 +254,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'hyperswap_1b_256':
{
'__metadata__':
{
'vendor': 'FaceFusion',
'license': 'ResearchRAIL',
'year': 2025
},
'hashes':
{
'face_swapper':
@@ -241,6 +284,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'hyperswap_1c_256':
{
'__metadata__':
{
'vendor': 'FaceFusion',
'license': 'ResearchRAIL',
'year': 2025
},
'hashes':
{
'face_swapper':
@@ -265,6 +314,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'inswapper_128':
{
'__metadata__':
{
'vendor': 'InsightFace',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'face_swapper':
@@ -289,6 +344,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'inswapper_128_fp16':
{
'__metadata__':
{
'vendor': 'InsightFace',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'face_swapper':
@@ -313,6 +374,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'simswap_256':
{
'__metadata__':
{
'vendor': 'neuralchen',
'license': 'Non-Commercial',
'year': 2020
},
'hashes':
{
'face_swapper':
@@ -347,6 +414,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'simswap_unofficial_512':
{
'__metadata__':
{
'vendor': 'neuralchen',
'license': 'Non-Commercial',
'year': 2020
},
'hashes':
{
'face_swapper':
@@ -381,6 +454,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'uniface_256':
{
'__metadata__':
{
'vendor': 'xc-csc101',
'license': 'Unknown',
'year': 2022
},
'hashes':
{
'face_swapper':
@@ -434,11 +513,11 @@ def get_model_name() -> str:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('processors', 'face_swapper_model', 'hyperswap_1a_256'), choices = processors_choices.face_swapper_models)
group_processors.add_argument('--face-swapper-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'face_swapper_model', 'hyperswap_1a_256'), choices = face_swapper_choices.face_swapper_models)
known_args, _ = program.parse_known_args()
face_swapper_pixel_boost_choices = processors_choices.face_swapper_set.get(known_args.face_swapper_model)
group_processors.add_argument('--face-swapper-pixel-boost', help = wording.get('help.face_swapper_pixel_boost'), default = config.get_str_value('processors', 'face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_choices)), choices = face_swapper_pixel_boost_choices)
group_processors.add_argument('--face-swapper-weight', help = wording.get('help.face_swapper_weight'), type = float, default = config.get_float_value('processors', 'face_swapper_weight', '0.5'), choices = processors_choices.face_swapper_weight_range)
face_swapper_pixel_boost_choices = face_swapper_choices.face_swapper_set.get(known_args.face_swapper_model)
group_processors.add_argument('--face-swapper-pixel-boost', help = translator.get('help.pixel_boost', __package__), default = config.get_str_value('processors', 'face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_choices)), choices = face_swapper_pixel_boost_choices)
group_processors.add_argument('--face-swapper-weight', help = translator.get('help.weight', __package__), type = float, default = config.get_float_value('processors', 'face_swapper_weight', '0.5'), choices = face_swapper_choices.face_swapper_weight_range)
facefusion.jobs.job_store.register_step_keys([ 'face_swapper_model', 'face_swapper_pixel_boost', 'face_swapper_weight' ])
@@ -457,7 +536,7 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if not has_image(state_manager.get_item('source_paths')):
logger.error(wording.get('choose_image_source') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_source') + translator.get('exclamation_mark'), __name__)
return False
source_image_paths = filter_image_paths(state_manager.get_item('source_paths'))
@@ -465,19 +544,19 @@ def pre_process(mode : ProcessMode) -> bool:
source_faces = get_many_faces(source_frames)
if not get_one_face(source_faces):
logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('no_source_face_detected') + translator.get('exclamation_mark'), __name__)
return False
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -678,11 +757,12 @@ def extract_source_face(source_vision_frames : List[VisionFrame]) -> Optional[Fa
return get_average_face(source_faces)
def process_frame(inputs : FaceSwapperInputs) -> VisionFrame:
def process_frame(inputs : FaceSwapperInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
source_vision_frames = inputs.get('source_vision_frames')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
source_face = extract_source_face(source_vision_frames)
target_faces = select_faces(reference_vision_frame, target_vision_frame)
@@ -691,4 +771,4 @@ def process_frame(inputs : FaceSwapperInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = swap_face(source_face, target_face, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,20 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for swapping the face',
'pixel_boost': 'choose the pixel boost resolution for the face swapper',
'weight': 'specify the degree of weight applied to the face'
},
'uis':
{
'model_dropdown': 'FACE SWAPPER MODEL',
'pixel_boost_dropdown': 'FACE SWAPPER PIXEL BOOST',
'weight_slider': 'FACE SWAPPER WEIGHT'
}
}
}

View File

@@ -0,0 +1,18 @@
from typing import Dict, List, Literal, TypeAlias, TypedDict
from facefusion.types import Mask, VisionFrame
FaceSwapperInputs = TypedDict('FaceSwapperInputs',
{
'reference_vision_frame' : VisionFrame,
'source_vision_frames' : List[VisionFrame],
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FaceSwapperModel = Literal['blendswap_256', 'ghost_1_256', 'ghost_2_256', 'ghost_3_256', 'hififace_unofficial_256', 'hyperswap_1a_256', 'hyperswap_1b_256', 'hyperswap_1c_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_unofficial_512', 'uniface_256']
FaceSwapperWeight : TypeAlias = float
FaceSwapperSet : TypeAlias = Dict[FaceSwapperModel, List[str]]

View File

@@ -0,0 +1,10 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.frame_colorizer.types import FrameColorizerModel
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ]
frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ]
frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1)

View File

@@ -7,13 +7,14 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_int_metavar, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import FrameColorizerInputs
from facefusion.processors.modules.frame_colorizer import choices as frame_colorizer_choices
from facefusion.processors.modules.frame_colorizer.types import FrameColorizerInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, ExecutionProvider, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -26,6 +27,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'ddcolor':
{
'__metadata__':
{
'vendor': 'piddnad',
'license': 'Apache-2.0',
'year': 2023
},
'hashes':
{
'frame_colorizer':
@@ -46,6 +53,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'ddcolor_artistic':
{
'__metadata__':
{
'vendor': 'piddnad',
'license': 'Apache-2.0',
'year': 2023
},
'hashes':
{
'frame_colorizer':
@@ -66,6 +79,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'deoldify':
{
'__metadata__':
{
'vendor': 'jantic',
'license': 'MIT',
'year': 2022
},
'hashes':
{
'frame_colorizer':
@@ -86,6 +105,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'deoldify_artistic':
{
'__metadata__':
{
'vendor': 'jantic',
'license': 'MIT',
'year': 2022
},
'hashes':
{
'frame_colorizer':
@@ -106,6 +131,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'deoldify_stable':
{
'__metadata__':
{
'vendor': 'jantic',
'license': 'MIT',
'year': 2022
},
'hashes':
{
'frame_colorizer':
@@ -153,9 +184,9 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--frame-colorizer-model', help = wording.get('help.frame_colorizer_model'), default = config.get_str_value('processors', 'frame_colorizer_model', 'ddcolor'), choices = processors_choices.frame_colorizer_models)
group_processors.add_argument('--frame-colorizer-size', help = wording.get('help.frame_colorizer_size'), type = str, default = config.get_str_value('processors', 'frame_colorizer_size', '256x256'), choices = processors_choices.frame_colorizer_sizes)
group_processors.add_argument('--frame-colorizer-blend', help = wording.get('help.frame_colorizer_blend'), type = int, default = config.get_int_value('processors', 'frame_colorizer_blend', '100'), choices = processors_choices.frame_colorizer_blend_range, metavar = create_int_metavar(processors_choices.frame_colorizer_blend_range))
group_processors.add_argument('--frame-colorizer-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'frame_colorizer_model', 'ddcolor'), choices = frame_colorizer_choices.frame_colorizer_models)
group_processors.add_argument('--frame-colorizer-size', help = translator.get('help.size', __package__), type = str, default = config.get_str_value('processors', 'frame_colorizer_size', '256x256'), choices = frame_colorizer_choices.frame_colorizer_sizes)
group_processors.add_argument('--frame-colorizer-blend', help = translator.get('help.blend', __package__), type = int, default = config.get_int_value('processors', 'frame_colorizer_blend', '100'), choices = frame_colorizer_choices.frame_colorizer_blend_range, metavar = create_int_metavar(frame_colorizer_choices.frame_colorizer_blend_range))
facefusion.jobs.job_store.register_step_keys([ 'frame_colorizer_model', 'frame_colorizer_blend', 'frame_colorizer_size' ])
@@ -174,13 +205,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -261,6 +292,8 @@ def blend_color_frame(temp_vision_frame : VisionFrame, color_vision_frame : Visi
return temp_vision_frame
def process_frame(inputs : FrameColorizerInputs) -> VisionFrame:
def process_frame(inputs : FrameColorizerInputs) -> ProcessorOutputs:
temp_vision_frame = inputs.get('temp_vision_frame')
return colorize_frame(temp_vision_frame)
temp_vision_mask = inputs.get('temp_vision_mask')
temp_vision_frame = colorize_frame(temp_vision_frame)
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,20 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for colorizing the frame',
'size': 'specify the frame size provided to the frame colorizer',
'blend': 'blend the colorized into the previous frame'
},
'uis':
{
'blend_slider': 'FRAME COLORIZER BLEND',
'model_dropdown': 'FRAME COLORIZER MODEL',
'size_dropdown': 'FRAME COLORIZER SIZE'
}
}
}

View File

@@ -0,0 +1,12 @@
from typing import Literal, TypedDict
from facefusion.types import Mask, VisionFrame
FrameColorizerInputs = TypedDict('FrameColorizerInputs',
{
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable']

View File

@@ -0,0 +1,8 @@
from typing import List, Sequence
from facefusion.common_helper import create_int_range
from facefusion.processors.modules.frame_enhancer.types import FrameEnhancerModel
frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'face_dat_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'tghq_face_x8', 'ultra_sharp_x4', 'ultra_sharp_2_x4' ]
frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)

View File

@@ -6,13 +6,14 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, inference_manager, logger, state_manager, video_manager, wording
from facefusion import config, content_analyser, inference_manager, logger, state_manager, translator, video_manager
from facefusion.common_helper import create_int_metavar, is_macos
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
from facefusion.execution import has_execution_provider
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import FrameEnhancerInputs
from facefusion.processors.modules.frame_enhancer import choices as frame_enhancer_choices
from facefusion.processors.modules.frame_enhancer.types import FrameEnhancerInputs
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.types import ApplyStateItem, Args, DownloadScope, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -25,6 +26,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'clear_reality_x4':
{
'__metadata__':
{
'vendor': 'Kim2091',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'frame_enhancer':
@@ -44,22 +51,28 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'size': (128, 8, 4),
'scale': 4
},
'lsdir_x4':
'face_dat_x4':
{
'__metadata__':
{
'vendor': 'Helaman',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'frame_enhancer':
{
'url': resolve_download_url('models-3.0.0', 'lsdir_x4.hash'),
'path': resolve_relative_path('../.assets/models/lsdir_x4.hash')
'url': resolve_download_url('models-3.5.0', 'face_dat_x4.hash'),
'path': resolve_relative_path('../.assets/models/face_dat_x4.hash')
}
},
'sources':
{
'frame_enhancer':
{
'url': resolve_download_url('models-3.0.0', 'lsdir_x4.onnx'),
'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx')
'url': resolve_download_url('models-3.5.0', 'face_dat_x4.onnx'),
'path': resolve_relative_path('../.assets/models/face_dat_x4.onnx')
}
},
'size': (128, 8, 4),
@@ -67,6 +80,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'nomos8k_sc_x4':
{
'__metadata__':
{
'vendor': 'Phhofm',
'license': 'Non-Commercial',
'year': 2023
},
'hashes':
{
'frame_enhancer':
@@ -88,6 +107,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x2':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -109,6 +134,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x2_fp16':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -130,6 +161,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x4':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -151,6 +188,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x4_fp16':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -172,6 +215,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x8':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -193,6 +242,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_esrgan_x8_fp16':
{
'__metadata__':
{
'vendor': 'xinntao',
'license': 'BSD-3-Clause',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -214,6 +269,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_hatgan_x4':
{
'__metadata__':
{
'vendor': 'XPixelGroup',
'license': 'Apache-2.0',
'year': 2023
},
'hashes':
{
'frame_enhancer':
@@ -235,6 +296,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'real_web_photo_x4':
{
'__metadata__':
{
'vendor': 'Helaman',
'license': 'Non-Commercial',
'year': 2024
},
'hashes':
{
'frame_enhancer':
@@ -256,6 +323,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'realistic_rescaler_x4':
{
'__metadata__':
{
'vendor': 'Mutin Choler',
'license': 'WTFPL',
'year': 2023
},
'hashes':
{
'frame_enhancer':
@@ -277,6 +350,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'remacri_x4':
{
'__metadata__':
{
'vendor': 'FoolhardyVEVO',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -298,6 +377,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'siax_x4':
{
'__metadata__':
{
'vendor': 'NMKD',
'license': 'WTFPL',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -319,6 +404,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'span_kendata_x4':
{
'__metadata__':
{
'vendor': 'terrainer',
'license': 'Non-Commercial',
'year': 2024
},
'hashes':
{
'frame_enhancer':
@@ -340,6 +431,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'swin2_sr_x4':
{
'__metadata__':
{
'vendor': 'mv-lab',
'license': 'Apache-2.0',
'year': 2022
},
'hashes':
{
'frame_enhancer':
@@ -359,8 +456,41 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
'size': (128, 8, 4),
'scale': 4
},
'tghq_face_x8':
{
'__metadata__':
{
'vendor': 'TorrentGuy',
'license': 'GPL-3.0',
'year': 2019
},
'hashes':
{
'frame_enhancer':
{
'url': resolve_download_url('models-3.5.0', 'tghq_face_x8.hash'),
'path': resolve_relative_path('../.assets/models/tghq_face_x8.hash')
}
},
'sources':
{
'frame_enhancer':
{
'url': resolve_download_url('models-3.5.0', 'tghq_face_x8.onnx'),
'path': resolve_relative_path('../.assets/models/tghq_face_x8.onnx')
}
},
'size': (128, 8, 4),
'scale': 8
},
'ultra_sharp_x4':
{
'__metadata__':
{
'vendor': 'Kim2091',
'license': 'Non-Commercial',
'year': 2021
},
'hashes':
{
'frame_enhancer':
@@ -382,6 +512,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'ultra_sharp_2_x4':
{
'__metadata__':
{
'vendor': 'Kim2091',
'license': 'Non-Commercial',
'year': 2025
},
'hashes':
{
'frame_enhancer':
@@ -437,8 +573,8 @@ def get_frame_enhancer_model() -> str:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('processors', 'frame_enhancer_model', 'span_kendata_x4'), choices = processors_choices.frame_enhancer_models)
group_processors.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('processors', 'frame_enhancer_blend', '80'), choices = processors_choices.frame_enhancer_blend_range, metavar = create_int_metavar(processors_choices.frame_enhancer_blend_range))
group_processors.add_argument('--frame-enhancer-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'frame_enhancer_model', 'span_kendata_x4'), choices = frame_enhancer_choices.frame_enhancer_models)
group_processors.add_argument('--frame-enhancer-blend', help = translator.get('help.blend', __package__), type = int, default = config.get_int_value('processors', 'frame_enhancer_blend', '80'), choices = frame_enhancer_choices.frame_enhancer_blend_range, metavar = create_int_metavar(frame_enhancer_choices.frame_enhancer_blend_range))
facefusion.jobs.job_store.register_step_keys([ 'frame_enhancer_model', 'frame_enhancer_blend' ])
@@ -456,13 +592,13 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_image_or_video_target') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('specify_image_or_video_output') + translator.get('exclamation_mark'), __name__)
return False
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('match_target_and_output_extension') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -525,6 +661,9 @@ def blend_merge_frame(temp_vision_frame : VisionFrame, merge_vision_frame : Visi
return temp_vision_frame
def process_frame(inputs : FrameEnhancerInputs) -> VisionFrame:
def process_frame(inputs : FrameEnhancerInputs) -> ProcessorOutputs:
temp_vision_frame = inputs.get('temp_vision_frame')
return enhance_frame(temp_vision_frame)
temp_vision_mask = inputs.get('temp_vision_mask')
temp_vision_frame = enhance_frame(temp_vision_frame)
temp_vision_mask = cv2.resize(temp_vision_mask, temp_vision_frame.shape[:2][::-1])
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,18 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for enhancing the frame',
'blend': 'blend the enhanced into the previous frame'
},
'uis':
{
'blend_slider': 'FRAME ENHANCER BLEND',
'model_dropdown': 'FRAME ENHANCER MODEL'
}
}
}

View File

@@ -0,0 +1,12 @@
from typing import Literal, TypedDict
from facefusion.types import Mask, VisionFrame
FrameEnhancerInputs = TypedDict('FrameEnhancerInputs',
{
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
FrameEnhancerModel = Literal['clear_reality_x4', 'face_dat_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'tghq_face_x8', 'ultra_sharp_x4', 'ultra_sharp_2_x4']

View File

@@ -0,0 +1,8 @@
from typing import List, Sequence
from facefusion.common_helper import create_float_range
from facefusion.processors.modules.lip_syncer.types import LipSyncerModel
lip_syncer_models : List[LipSyncerModel] = [ 'edtalk_256', 'wav2lip_96', 'wav2lip_gan_96' ]
lip_syncer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)

View File

@@ -6,7 +6,7 @@ import numpy
import facefusion.jobs.job_manager
import facefusion.jobs.job_store
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, video_manager, voice_extractor, wording
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, state_manager, translator, video_manager, voice_extractor
from facefusion.audio import read_static_voice
from facefusion.common_helper import create_float_metavar
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
@@ -15,8 +15,9 @@ from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask
from facefusion.face_selector import select_faces
from facefusion.filesystem import has_audio, resolve_relative_path
from facefusion.processors import choices as processors_choices
from facefusion.processors.types import LipSyncerInputs, LipSyncerWeight
from facefusion.processors.modules.lip_syncer import choices as lip_syncer_choices
from facefusion.processors.modules.lip_syncer.types import LipSyncerInputs, LipSyncerWeight
from facefusion.processors.types import ProcessorOutputs
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.types import ApplyStateItem, Args, AudioFrame, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, VisionFrame
@@ -29,6 +30,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
{
'edtalk_256':
{
'__metadata__':
{
'vendor': 'tanshuai0219',
'license': 'Apache-2.0',
'year': 2024
},
'hashes':
{
'lip_syncer':
@@ -50,6 +57,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'wav2lip_96':
{
'__metadata__':
{
'vendor': 'Rudrabha',
'license': 'Non-Commercial',
'year': 2020
},
'hashes':
{
'lip_syncer':
@@ -71,6 +84,12 @@ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
},
'wav2lip_gan_96':
{
'__metadata__':
{
'vendor': 'Rudrabha',
'license': 'Non-Commercial',
'year': 2020
},
'hashes':
{
'lip_syncer':
@@ -113,8 +132,8 @@ def get_model_options() -> ModelOptions:
def register_args(program : ArgumentParser) -> None:
group_processors = find_argument_group(program, 'processors')
if group_processors:
group_processors.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('processors', 'lip_syncer_model', 'wav2lip_gan_96'), choices = processors_choices.lip_syncer_models)
group_processors.add_argument('--lip-syncer-weight', help = wording.get('help.lip_syncer_weight'), type = float, default = config.get_float_value('processors', 'lip_syncer_weight', '0.5'), choices = processors_choices.lip_syncer_weight_range, metavar = create_float_metavar(processors_choices.lip_syncer_weight_range))
group_processors.add_argument('--lip-syncer-model', help = translator.get('help.model', __package__), default = config.get_str_value('processors', 'lip_syncer_model', 'wav2lip_gan_96'), choices = lip_syncer_choices.lip_syncer_models)
group_processors.add_argument('--lip-syncer-weight', help = translator.get('help.weight', __package__), type = float, default = config.get_float_value('processors', 'lip_syncer_weight', '0.5'), choices = lip_syncer_choices.lip_syncer_weight_range, metavar = create_float_metavar(lip_syncer_choices.lip_syncer_weight_range))
facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model', 'lip_syncer_weight' ])
@@ -132,7 +151,7 @@ def pre_check() -> bool:
def pre_process(mode : ProcessMode) -> bool:
if not has_audio(state_manager.get_item('source_paths')):
logger.error(wording.get('choose_audio_source') + wording.get('exclamation_mark'), __name__)
logger.error(translator.get('choose_audio_source') + translator.get('exclamation_mark'), __name__)
return False
return True
@@ -261,11 +280,12 @@ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
return crop_vision_frame
def process_frame(inputs : LipSyncerInputs) -> VisionFrame:
def process_frame(inputs : LipSyncerInputs) -> ProcessorOutputs:
reference_vision_frame = inputs.get('reference_vision_frame')
source_voice_frame = inputs.get('source_voice_frame')
target_vision_frame = inputs.get('target_vision_frame')
temp_vision_frame = inputs.get('temp_vision_frame')
temp_vision_mask = inputs.get('temp_vision_mask')
target_faces = select_faces(reference_vision_frame, target_vision_frame)
if target_faces:
@@ -273,5 +293,4 @@ def process_frame(inputs : LipSyncerInputs) -> VisionFrame:
target_face = scale_face(target_face, target_vision_frame, temp_vision_frame)
temp_vision_frame = sync_lip(target_face, source_voice_frame, temp_vision_frame)
return temp_vision_frame
return temp_vision_frame, temp_vision_mask

View File

@@ -0,0 +1,18 @@
from facefusion.types import Locals
LOCALS : Locals =\
{
'en':
{
'help':
{
'model': 'choose the model responsible for syncing the lips',
'weight': 'specify the degree of weight applied to the lips'
},
'uis':
{
'model_dropdown': 'LIP SYNCER MODEL',
'weight_slider': 'LIP SYNCER WEIGHT'
}
}
}

View File

@@ -0,0 +1,18 @@
from typing import Any, Literal, TypeAlias, TypedDict
from numpy.typing import NDArray
from facefusion.types import AudioFrame, Mask, VisionFrame
LipSyncerInputs = TypedDict('LipSyncerInputs',
{
'reference_vision_frame' : VisionFrame,
'source_voice_frame' : AudioFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame,
'temp_vision_mask' : Mask
})
LipSyncerModel = Literal['edtalk_256', 'wav2lip_96', 'wav2lip_gan_96']
LipSyncerWeight : TypeAlias = NDArray[Any]

View File

@@ -1,90 +1,9 @@
from typing import Any, Dict, List, Literal, TypeAlias, TypedDict
from typing import Any, Dict, Tuple, TypeAlias
from numpy.typing import NDArray
from facefusion.types import AppContext, AudioFrame, VisionFrame
from facefusion.types import AppContext, Mask, VisionFrame
AgeModifierModel = Literal['styleganex_age']
DeepSwapperModel : TypeAlias = str
ExpressionRestorerModel = Literal['live_portrait']
ExpressionRestorerArea = Literal['upper-face', 'lower-face']
FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask']
FaceEditorModel = Literal['live_portrait']
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus']
FaceSwapperModel = Literal['blendswap_256', 'ghost_1_256', 'ghost_2_256', 'ghost_3_256', 'hififace_unofficial_256', 'hyperswap_1a_256', 'hyperswap_1b_256', 'hyperswap_1c_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_unofficial_512', 'uniface_256']
FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable']
FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4', 'ultra_sharp_2_x4']
LipSyncerModel = Literal['edtalk_256', 'wav2lip_96', 'wav2lip_gan_96']
FaceSwapperSet : TypeAlias = Dict[FaceSwapperModel, List[str]]
AgeModifierInputs = TypedDict('AgeModifierInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
DeepSwapperInputs = TypedDict('DeepSwapperInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
ExpressionRestorerInputs = TypedDict('ExpressionRestorerInputs',
{
'reference_vision_frame' : VisionFrame,
'source_vision_frames' : List[VisionFrame],
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FaceDebuggerInputs = TypedDict('FaceDebuggerInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FaceEditorInputs = TypedDict('FaceEditorInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FaceEnhancerInputs = TypedDict('FaceEnhancerInputs',
{
'reference_vision_frame' : VisionFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FaceSwapperInputs = TypedDict('FaceSwapperInputs',
{
'reference_vision_frame' : VisionFrame,
'source_vision_frames' : List[VisionFrame],
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FrameColorizerInputs = TypedDict('FrameColorizerInputs',
{
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
FrameEnhancerInputs = TypedDict('FrameEnhancerInputs',
{
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
LipSyncerInputs = TypedDict('LipSyncerInputs',
{
'reference_vision_frame' : VisionFrame,
'source_voice_frame' : AudioFrame,
'target_vision_frame' : VisionFrame,
'temp_vision_frame' : VisionFrame
})
AgeModifierDirection : TypeAlias = NDArray[Any]
DeepSwapperMorph : TypeAlias = NDArray[Any]
FaceEnhancerWeight : TypeAlias = NDArray[Any]
FaceSwapperWeight : TypeAlias = float
LipSyncerWeight : TypeAlias = NDArray[Any]
LivePortraitPitch : TypeAlias = float
LivePortraitYaw : TypeAlias = float
LivePortraitRoll : TypeAlias = float
@@ -95,82 +14,7 @@ LivePortraitRotation : TypeAlias = NDArray[Any]
LivePortraitScale : TypeAlias = NDArray[Any]
LivePortraitTranslation : TypeAlias = NDArray[Any]
ProcessorStateKey = Literal\
[
'age_modifier_model',
'age_modifier_direction',
'deep_swapper_model',
'deep_swapper_morph',
'expression_restorer_model',
'expression_restorer_factor',
'expression_restorer_areas',
'face_debugger_items',
'face_editor_model',
'face_editor_eyebrow_direction',
'face_editor_eye_gaze_horizontal',
'face_editor_eye_gaze_vertical',
'face_editor_eye_open_ratio',
'face_editor_lip_open_ratio',
'face_editor_mouth_grim',
'face_editor_mouth_pout',
'face_editor_mouth_purse',
'face_editor_mouth_smile',
'face_editor_mouth_position_horizontal',
'face_editor_mouth_position_vertical',
'face_editor_head_pitch',
'face_editor_head_yaw',
'face_editor_head_roll',
'face_enhancer_model',
'face_enhancer_blend',
'face_enhancer_weight',
'face_swapper_model',
'face_swapper_pixel_boost',
'face_swapper_weight',
'frame_colorizer_model',
'frame_colorizer_size',
'frame_colorizer_blend',
'frame_enhancer_model',
'frame_enhancer_blend',
'lip_syncer_model',
'lip_syncer_weight'
]
ProcessorState = TypedDict('ProcessorState',
{
'age_modifier_model' : AgeModifierModel,
'age_modifier_direction' : int,
'deep_swapper_model' : DeepSwapperModel,
'deep_swapper_morph' : int,
'expression_restorer_model' : ExpressionRestorerModel,
'expression_restorer_factor' : int,
'expression_restorer_areas' : List[ExpressionRestorerArea],
'face_debugger_items' : List[FaceDebuggerItem],
'face_editor_model' : FaceEditorModel,
'face_editor_eyebrow_direction' : float,
'face_editor_eye_gaze_horizontal' : float,
'face_editor_eye_gaze_vertical' : float,
'face_editor_eye_open_ratio' : float,
'face_editor_lip_open_ratio' : float,
'face_editor_mouth_grim' : float,
'face_editor_mouth_pout' : float,
'face_editor_mouth_purse' : float,
'face_editor_mouth_smile' : float,
'face_editor_mouth_position_horizontal' : float,
'face_editor_mouth_position_vertical' : float,
'face_editor_head_pitch' : float,
'face_editor_head_yaw' : float,
'face_editor_head_roll' : float,
'face_enhancer_model' : FaceEnhancerModel,
'face_enhancer_blend' : int,
'face_enhancer_weight' : FaceEnhancerWeight,
'face_swapper_model' : FaceSwapperModel,
'face_swapper_pixel_boost' : str,
'face_swapper_weight' : FaceSwapperWeight,
'frame_colorizer_model' : FrameColorizerModel,
'frame_colorizer_size' : str,
'frame_colorizer_blend' : int,
'frame_enhancer_model' : FrameEnhancerModel,
'frame_enhancer_blend' : int,
'lip_syncer_model' : LipSyncerModel,
'lip_syncer_weight' : LipSyncerWeight
})
ProcessorStateKey = str
ProcessorState : TypeAlias = Dict[ProcessorStateKey, Any]
ProcessorStateSet : TypeAlias = Dict[AppContext, ProcessorState]
ProcessorOutputs : TypeAlias = Tuple[VisionFrame, Mask]

View File

@@ -1,14 +1,16 @@
import tempfile
from argparse import ArgumentParser, HelpFormatter
from functools import partial
import facefusion.choices
from facefusion import config, metadata, state_manager, wording
from facefusion import config, metadata, state_manager, translator
from facefusion.common_helper import create_float_metavar, create_int_metavar, get_first, get_last
from facefusion.execution import get_available_execution_providers
from facefusion.ffmpeg import get_available_encoder_set
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.jobs import job_store
from facefusion.processors.core import get_processors_modules
from facefusion.sanitizer import sanitize_int_range
def create_help_formatter_small(prog : str) -> HelpFormatter:
@@ -22,7 +24,7 @@ def create_help_formatter_large(prog : str) -> HelpFormatter:
def create_config_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('--config-path', help = wording.get('help.config_path'), default = 'facefusion.ini')
group_paths.add_argument('--config-path', help = translator.get('help.config_path'), default = 'facefusion.ini')
job_store.register_job_keys([ 'config_path' ])
apply_config_path(program)
return program
@@ -31,7 +33,7 @@ def create_config_path_program() -> ArgumentParser:
def create_temp_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('--temp-path', help = wording.get('help.temp_path'), default = config.get_str_value('paths', 'temp_path', tempfile.gettempdir()))
group_paths.add_argument('--temp-path', help = translator.get('help.temp_path'), default = config.get_str_value('paths', 'temp_path', tempfile.gettempdir()))
job_store.register_job_keys([ 'temp_path' ])
return program
@@ -39,7 +41,7 @@ def create_temp_path_program() -> ArgumentParser:
def create_jobs_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths', 'jobs_path', '.jobs'))
group_paths.add_argument('--jobs-path', help = translator.get('help.jobs_path'), default = config.get_str_value('paths', 'jobs_path', '.jobs'))
job_store.register_job_keys([ 'jobs_path' ])
return program
@@ -47,7 +49,7 @@ def create_jobs_path_program() -> ArgumentParser:
def create_source_paths_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), default = config.get_str_list('paths', 'source_paths'), nargs = '+')
group_paths.add_argument('-s', '--source-paths', help = translator.get('help.source_paths'), default = config.get_str_list('paths', 'source_paths'), nargs = '+')
job_store.register_step_keys([ 'source_paths' ])
return program
@@ -55,7 +57,7 @@ def create_source_paths_program() -> ArgumentParser:
def create_target_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('-t', '--target-path', help = wording.get('help.target_path'), default = config.get_str_value('paths', 'target_path'))
group_paths.add_argument('-t', '--target-path', help = translator.get('help.target_path'), default = config.get_str_value('paths', 'target_path'))
job_store.register_step_keys([ 'target_path' ])
return program
@@ -63,7 +65,7 @@ def create_target_path_program() -> ArgumentParser:
def create_output_path_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_paths = program.add_argument_group('paths')
group_paths.add_argument('-o', '--output-path', help = wording.get('help.output_path'), default = config.get_str_value('paths', 'output_path'))
group_paths.add_argument('-o', '--output-path', help = translator.get('help.output_path'), default = config.get_str_value('paths', 'output_path'))
job_store.register_step_keys([ 'output_path' ])
return program
@@ -71,7 +73,7 @@ def create_output_path_program() -> ArgumentParser:
def create_source_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-s', '--source-pattern', help = wording.get('help.source_pattern'), default = config.get_str_value('patterns', 'source_pattern'))
group_patterns.add_argument('-s', '--source-pattern', help = translator.get('help.source_pattern'), default = config.get_str_value('patterns', 'source_pattern'))
job_store.register_job_keys([ 'source_pattern' ])
return program
@@ -79,7 +81,7 @@ def create_source_pattern_program() -> ArgumentParser:
def create_target_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-t', '--target-pattern', help = wording.get('help.target_pattern'), default = config.get_str_value('patterns', 'target_pattern'))
group_patterns.add_argument('-t', '--target-pattern', help = translator.get('help.target_pattern'), default = config.get_str_value('patterns', 'target_pattern'))
job_store.register_job_keys([ 'target_pattern' ])
return program
@@ -87,7 +89,7 @@ def create_target_pattern_program() -> ArgumentParser:
def create_output_pattern_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_patterns = program.add_argument_group('patterns')
group_patterns.add_argument('-o', '--output-pattern', help = wording.get('help.output_pattern'), default = config.get_str_value('patterns', 'output_pattern'))
group_patterns.add_argument('-o', '--output-pattern', help = translator.get('help.output_pattern'), default = config.get_str_value('patterns', 'output_pattern'))
job_store.register_job_keys([ 'output_pattern' ])
return program
@@ -95,21 +97,22 @@ def create_output_pattern_program() -> ArgumentParser:
def create_face_detector_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_face_detector = program.add_argument_group('face detector')
group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector', 'face_detector_model', 'yolo_face'), choices = facefusion.choices.face_detector_models)
group_face_detector.add_argument('--face-detector-model', help = translator.get('help.face_detector_model'), default = config.get_str_value('face_detector', 'face_detector_model', 'yolo_face'), choices = facefusion.choices.face_detector_models)
known_args, _ = program.parse_known_args()
face_detector_size_choices = facefusion.choices.face_detector_set.get(known_args.face_detector_model)
group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector', 'face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices)
group_face_detector.add_argument('--face-detector-angles', help = wording.get('help.face_detector_angles'), type = int, default = config.get_int_list('face_detector', 'face_detector_angles', '0'), choices = facefusion.choices.face_detector_angles, nargs = '+', metavar = 'FACE_DETECTOR_ANGLES')
group_face_detector.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_detector', 'face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_float_metavar(facefusion.choices.face_detector_score_range))
job_store.register_step_keys([ 'face_detector_model', 'face_detector_angles', 'face_detector_size', 'face_detector_score' ])
group_face_detector.add_argument('--face-detector-size', help = translator.get('help.face_detector_size'), default = config.get_str_value('face_detector', 'face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices)
group_face_detector.add_argument('--face-detector-margin', help = translator.get('help.face_detector_margin'), type = partial(sanitize_int_range, int_range = facefusion.choices.face_detector_margin_range), default = config.get_int_list('face_detector', 'face_detector_margin', '0 0 0 0'), nargs = '+')
group_face_detector.add_argument('--face-detector-angles', help = translator.get('help.face_detector_angles'), type = int, default = config.get_int_list('face_detector', 'face_detector_angles', '0'), choices = facefusion.choices.face_detector_angles, nargs = '+', metavar = 'FACE_DETECTOR_ANGLES')
group_face_detector.add_argument('--face-detector-score', help = translator.get('help.face_detector_score'), type = float, default = config.get_float_value('face_detector', 'face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_float_metavar(facefusion.choices.face_detector_score_range))
job_store.register_step_keys([ 'face_detector_model', 'face_detector_size', 'face_detector_margin', 'face_detector_angles', 'face_detector_score' ])
return program
def create_face_landmarker_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_face_landmarker = program.add_argument_group('face landmarker')
group_face_landmarker.add_argument('--face-landmarker-model', help = wording.get('help.face_landmarker_model'), default = config.get_str_value('face_landmarker', 'face_landmarker_model', '2dfan4'), choices = facefusion.choices.face_landmarker_models)
group_face_landmarker.add_argument('--face-landmarker-score', help = wording.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_landmarker', 'face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_float_metavar(facefusion.choices.face_landmarker_score_range))
group_face_landmarker.add_argument('--face-landmarker-model', help = translator.get('help.face_landmarker_model'), default = config.get_str_value('face_landmarker', 'face_landmarker_model', '2dfan4'), choices = facefusion.choices.face_landmarker_models)
group_face_landmarker.add_argument('--face-landmarker-score', help = translator.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_landmarker', 'face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_float_metavar(facefusion.choices.face_landmarker_score_range))
job_store.register_step_keys([ 'face_landmarker_model', 'face_landmarker_score' ])
return program
@@ -117,15 +120,15 @@ def create_face_landmarker_program() -> ArgumentParser:
def create_face_selector_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_face_selector = program.add_argument_group('face selector')
group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector', 'face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--face-selector-order', help = wording.get('help.face_selector_order'), default = config.get_str_value('face_selector', 'face_selector_order', 'large-small'), choices = facefusion.choices.face_selector_orders)
group_face_selector.add_argument('--face-selector-age-start', help = wording.get('help.face_selector_age_start'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_start'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-age-end', help = wording.get('help.face_selector_age_end'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_end'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-gender', help = wording.get('help.face_selector_gender'), default = config.get_str_value('face_selector', 'face_selector_gender'), choices = facefusion.choices.face_selector_genders)
group_face_selector.add_argument('--face-selector-race', help = wording.get('help.face_selector_race'), default = config.get_str_value('face_selector', 'face_selector_race'), choices = facefusion.choices.face_selector_races)
group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector', 'reference_face_position', '0'))
group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector', 'reference_face_distance', '0.3'), choices = facefusion.choices.reference_face_distance_range, metavar = create_float_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector', 'reference_frame_number', '0'))
group_face_selector.add_argument('--face-selector-mode', help = translator.get('help.face_selector_mode'), default = config.get_str_value('face_selector', 'face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--face-selector-order', help = translator.get('help.face_selector_order'), default = config.get_str_value('face_selector', 'face_selector_order', 'large-small'), choices = facefusion.choices.face_selector_orders)
group_face_selector.add_argument('--face-selector-age-start', help = translator.get('help.face_selector_age_start'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_start'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-age-end', help = translator.get('help.face_selector_age_end'), type = int, default = config.get_int_value('face_selector', 'face_selector_age_end'), choices = facefusion.choices.face_selector_age_range, metavar = create_int_metavar(facefusion.choices.face_selector_age_range))
group_face_selector.add_argument('--face-selector-gender', help = translator.get('help.face_selector_gender'), default = config.get_str_value('face_selector', 'face_selector_gender'), choices = facefusion.choices.face_selector_genders)
group_face_selector.add_argument('--face-selector-race', help = translator.get('help.face_selector_race'), default = config.get_str_value('face_selector', 'face_selector_race'), choices = facefusion.choices.face_selector_races)
group_face_selector.add_argument('--reference-face-position', help = translator.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector', 'reference_face_position', '0'))
group_face_selector.add_argument('--reference-face-distance', help = translator.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector', 'reference_face_distance', '0.3'), choices = facefusion.choices.reference_face_distance_range, metavar = create_float_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = translator.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector', 'reference_frame_number', '0'))
job_store.register_step_keys([ 'face_selector_mode', 'face_selector_order', 'face_selector_gender', 'face_selector_race', 'face_selector_age_start', 'face_selector_age_end', 'reference_face_position', 'reference_face_distance', 'reference_frame_number' ])
return program
@@ -133,13 +136,13 @@ def create_face_selector_program() -> ArgumentParser:
def create_face_masker_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_face_masker = program.add_argument_group('face masker')
group_face_masker.add_argument('--face-occluder-model', help = wording.get('help.face_occluder_model'), default = config.get_str_value('face_masker', 'face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models)
group_face_masker.add_argument('--face-parser-model', help = wording.get('help.face_parser_model'), default = config.get_str_value('face_masker', 'face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models)
group_face_masker.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker', 'face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_masker.add_argument('--face-mask-areas', help = wording.get('help.face_mask_areas').format(choices = ', '.join(facefusion.choices.face_mask_areas)), default = config.get_str_list('face_masker', 'face_mask_areas', ' '.join(facefusion.choices.face_mask_areas)), choices = facefusion.choices.face_mask_areas, nargs = '+', metavar = 'FACE_MASK_AREAS')
group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker', 'face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
group_face_masker.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker', 'face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range))
group_face_masker.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_masker', 'face_mask_padding', '0 0 0 0'), nargs = '+')
group_face_masker.add_argument('--face-occluder-model', help = translator.get('help.face_occluder_model'), default = config.get_str_value('face_masker', 'face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models)
group_face_masker.add_argument('--face-parser-model', help = translator.get('help.face_parser_model'), default = config.get_str_value('face_masker', 'face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models)
group_face_masker.add_argument('--face-mask-types', help = translator.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker', 'face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_masker.add_argument('--face-mask-areas', help = translator.get('help.face_mask_areas').format(choices = ', '.join(facefusion.choices.face_mask_areas)), default = config.get_str_list('face_masker', 'face_mask_areas', ' '.join(facefusion.choices.face_mask_areas)), choices = facefusion.choices.face_mask_areas, nargs = '+', metavar = 'FACE_MASK_AREAS')
group_face_masker.add_argument('--face-mask-regions', help = translator.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker', 'face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
group_face_masker.add_argument('--face-mask-blur', help = translator.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker', 'face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range))
group_face_masker.add_argument('--face-mask-padding', help = translator.get('help.face_mask_padding'), type = partial(sanitize_int_range, int_range = facefusion.choices.face_mask_padding_range), default = config.get_int_list('face_masker', 'face_mask_padding', '0 0 0 0'), nargs = '+')
job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_areas', 'face_mask_regions', 'face_mask_blur', 'face_mask_padding' ])
return program
@@ -147,7 +150,7 @@ def create_face_masker_program() -> ArgumentParser:
def create_voice_extractor_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_voice_extractor = program.add_argument_group('voice extractor')
group_voice_extractor.add_argument('--voice-extractor-model', help = wording.get('help.voice_extractor_model'), default = config.get_str_value('voice_extractor', 'voice_extractor_model', 'kim_vocal_2'), choices = facefusion.choices.voice_extractor_models)
group_voice_extractor.add_argument('--voice-extractor-model', help = translator.get('help.voice_extractor_model'), default = config.get_str_value('voice_extractor', 'voice_extractor_model', 'kim_vocal_2'), choices = facefusion.choices.voice_extractor_models)
job_store.register_step_keys([ 'voice_extractor_model' ])
return program
@@ -155,10 +158,10 @@ def create_voice_extractor_program() -> ArgumentParser:
def create_frame_extraction_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_frame_extraction = program.add_argument_group('frame extraction')
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_start'))
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_end'))
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction', 'temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction', 'keep_temp'))
group_frame_extraction.add_argument('--trim-frame-start', help = translator.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_start'))
group_frame_extraction.add_argument('--trim-frame-end', help = translator.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction', 'trim_frame_end'))
group_frame_extraction.add_argument('--temp-frame-format', help = translator.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction', 'temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--keep-temp', help = translator.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction', 'keep_temp'))
job_store.register_step_keys([ 'trim_frame_start', 'trim_frame_end', 'temp_frame_format', 'keep_temp' ])
return program
@@ -167,16 +170,16 @@ def create_output_creation_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_encoder_set = get_available_encoder_set()
group_output_creation = program.add_argument_group('output creation')
group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation', 'output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_int_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-image-scale', help = wording.get('help.output_image_scale'), type = float, default = config.get_float_value('output_creation', 'output_image_scale', '1.0'), choices = facefusion.choices.output_image_scale_range)
group_output_creation.add_argument('--output-audio-encoder', help = wording.get('help.output_audio_encoder'), default = config.get_str_value('output_creation', 'output_audio_encoder', get_first(available_encoder_set.get('audio'))), choices = available_encoder_set.get('audio'))
group_output_creation.add_argument('--output-audio-quality', help = wording.get('help.output_audio_quality'), type = int, default = config.get_int_value('output_creation', 'output_audio_quality', '80'), choices = facefusion.choices.output_audio_quality_range, metavar = create_int_metavar(facefusion.choices.output_audio_quality_range))
group_output_creation.add_argument('--output-audio-volume', help = wording.get('help.output_audio_volume'), type = int, default = config.get_int_value('output_creation', 'output_audio_volume', '100'), choices = facefusion.choices.output_audio_volume_range, metavar = create_int_metavar(facefusion.choices.output_audio_volume_range))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation', 'output_video_encoder', get_first(available_encoder_set.get('video'))), choices = available_encoder_set.get('video'))
group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation', 'output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation', 'output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_int_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--output-video-scale', help = wording.get('help.output_video_scale'), type = float, default = config.get_float_value('output_creation', 'output_video_scale', '1.0'), choices = facefusion.choices.output_video_scale_range)
group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float, default = config.get_float_value('output_creation', 'output_video_fps'))
group_output_creation.add_argument('--output-image-quality', help = translator.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation', 'output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_int_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-image-scale', help = translator.get('help.output_image_scale'), type = float, default = config.get_float_value('output_creation', 'output_image_scale', '1.0'), choices = facefusion.choices.output_image_scale_range)
group_output_creation.add_argument('--output-audio-encoder', help = translator.get('help.output_audio_encoder'), default = config.get_str_value('output_creation', 'output_audio_encoder', get_first(available_encoder_set.get('audio'))), choices = available_encoder_set.get('audio'))
group_output_creation.add_argument('--output-audio-quality', help = translator.get('help.output_audio_quality'), type = int, default = config.get_int_value('output_creation', 'output_audio_quality', '80'), choices = facefusion.choices.output_audio_quality_range, metavar = create_int_metavar(facefusion.choices.output_audio_quality_range))
group_output_creation.add_argument('--output-audio-volume', help = translator.get('help.output_audio_volume'), type = int, default = config.get_int_value('output_creation', 'output_audio_volume', '100'), choices = facefusion.choices.output_audio_volume_range, metavar = create_int_metavar(facefusion.choices.output_audio_volume_range))
group_output_creation.add_argument('--output-video-encoder', help = translator.get('help.output_video_encoder'), default = config.get_str_value('output_creation', 'output_video_encoder', get_first(available_encoder_set.get('video'))), choices = available_encoder_set.get('video'))
group_output_creation.add_argument('--output-video-preset', help = translator.get('help.output_video_preset'), default = config.get_str_value('output_creation', 'output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
group_output_creation.add_argument('--output-video-quality', help = translator.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation', 'output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_int_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--output-video-scale', help = translator.get('help.output_video_scale'), type = float, default = config.get_float_value('output_creation', 'output_video_scale', '1.0'), choices = facefusion.choices.output_video_scale_range)
group_output_creation.add_argument('--output-video-fps', help = translator.get('help.output_video_fps'), type = float, default = config.get_float_value('output_creation', 'output_video_fps'))
job_store.register_step_keys([ 'output_image_quality', 'output_image_scale', 'output_audio_encoder', 'output_audio_quality', 'output_audio_volume', 'output_video_encoder', 'output_video_preset', 'output_video_quality', 'output_video_scale', 'output_video_fps' ])
return program
@@ -185,7 +188,7 @@ def create_processors_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
group_processors = program.add_argument_group('processors')
group_processors.add_argument('--processors', help = wording.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors', 'processors', 'face_swapper'), nargs = '+')
group_processors.add_argument('--processors', help = translator.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors', 'processors', 'face_swapper'), nargs = '+')
job_store.register_step_keys([ 'processors' ])
for processor_module in get_processors_modules(available_processors):
processor_module.register_args(program)
@@ -196,16 +199,16 @@ def create_uis_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_ui_layouts = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/uis/layouts') ]
group_uis = program.add_argument_group('uis')
group_uis.add_argument('--open-browser', help = wording.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis', 'open_browser'))
group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), nargs = '+')
group_uis.add_argument('--ui-workflow', help = wording.get('help.ui_workflow'), default = config.get_str_value('uis', 'ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows)
group_uis.add_argument('--open-browser', help = translator.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis', 'open_browser'))
group_uis.add_argument('--ui-layouts', help = translator.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis', 'ui_layouts', 'default'), nargs = '+')
group_uis.add_argument('--ui-workflow', help = translator.get('help.ui_workflow'), default = config.get_str_value('uis', 'ui_workflow', 'instant_runner'), choices = facefusion.choices.ui_workflows)
return program
def create_download_providers_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(facefusion.choices.download_providers)), default = config.get_str_list('download', 'download_providers', ' '.join(facefusion.choices.download_providers)), choices = facefusion.choices.download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS')
group_download.add_argument('--download-providers', help = translator.get('help.download_providers').format(choices = ', '.join(facefusion.choices.download_providers)), default = config.get_str_list('download', 'download_providers', ' '.join(facefusion.choices.download_providers)), choices = facefusion.choices.download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS')
job_store.register_job_keys([ 'download_providers' ])
return program
@@ -213,7 +216,7 @@ def create_download_providers_program() -> ArgumentParser:
def create_download_scope_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download', 'download_scope', 'lite'), choices = facefusion.choices.download_scopes)
group_download.add_argument('--download-scope', help = translator.get('help.download_scope'), default = config.get_str_value('download', 'download_scope', 'lite'), choices = facefusion.choices.download_scopes)
job_store.register_job_keys([ 'download_scope' ])
return program
@@ -221,9 +224,9 @@ def create_download_scope_program() -> ArgumentParser:
def create_benchmark_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_benchmark = program.add_argument_group('benchmark')
group_benchmark.add_argument('--benchmark-mode', help = wording.get('help.benchmark_mode'), default = config.get_str_value('benchmark', 'benchmark_mode', 'warm'), choices = facefusion.choices.benchmark_modes)
group_benchmark.add_argument('--benchmark-resolutions', help = wording.get('help.benchmark_resolutions'), default = config.get_str_list('benchmark', 'benchmark_resolutions', get_first(facefusion.choices.benchmark_resolutions)), choices = facefusion.choices.benchmark_resolutions, nargs = '+')
group_benchmark.add_argument('--benchmark-cycle-count', help = wording.get('help.benchmark_cycle_count'), type = int, default = config.get_int_value('benchmark', 'benchmark_cycle_count', '5'), choices = facefusion.choices.benchmark_cycle_count_range)
group_benchmark.add_argument('--benchmark-mode', help = translator.get('help.benchmark_mode'), default = config.get_str_value('benchmark', 'benchmark_mode', 'warm'), choices = facefusion.choices.benchmark_modes)
group_benchmark.add_argument('--benchmark-resolutions', help = translator.get('help.benchmark_resolutions'), default = config.get_str_list('benchmark', 'benchmark_resolutions', get_first(facefusion.choices.benchmark_resolutions)), choices = facefusion.choices.benchmark_resolutions, nargs = '+')
group_benchmark.add_argument('--benchmark-cycle-count', help = translator.get('help.benchmark_cycle_count'), type = int, default = config.get_int_value('benchmark', 'benchmark_cycle_count', '5'), choices = facefusion.choices.benchmark_cycle_count_range)
return program
@@ -231,9 +234,9 @@ def create_execution_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_execution_providers = get_available_execution_providers()
group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-device-ids', help = wording.get('help.execution_device_ids'), default = config.get_str_list('execution', 'execution_device_ids', '0'), nargs = '+', metavar = 'EXECUTION_DEVICE_IDS')
group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(available_execution_providers)), default = config.get_str_list('execution', 'execution_providers', get_first(available_execution_providers)), choices = available_execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution', 'execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-device-ids', help = translator.get('help.execution_device_ids'), type = int, default = config.get_str_list('execution', 'execution_device_ids', '0'), nargs = '+', metavar = 'EXECUTION_DEVICE_IDS')
group_execution.add_argument('--execution-providers', help = translator.get('help.execution_providers').format(choices = ', '.join(available_execution_providers)), default = config.get_str_list('execution', 'execution_providers', get_first(available_execution_providers)), choices = available_execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = translator.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution', 'execution_thread_count', '8'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range))
job_store.register_job_keys([ 'execution_device_ids', 'execution_providers', 'execution_thread_count' ])
return program
@@ -241,8 +244,8 @@ def create_execution_program() -> ArgumentParser:
def create_memory_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_memory = program.add_argument_group('memory')
group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory', 'video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory', 'system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_int_metavar(facefusion.choices.system_memory_limit_range))
group_memory.add_argument('--video-memory-strategy', help = translator.get('help.video_memory_strategy'), default = config.get_str_value('memory', 'video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
group_memory.add_argument('--system-memory-limit', help = translator.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory', 'system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_int_metavar(facefusion.choices.system_memory_limit_range))
job_store.register_job_keys([ 'video_memory_strategy', 'system_memory_limit' ])
return program
@@ -250,7 +253,7 @@ def create_memory_program() -> ArgumentParser:
def create_log_level_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_misc = program.add_argument_group('misc')
group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc', 'log_level', 'info'), choices = facefusion.choices.log_levels)
group_misc.add_argument('--log-level', help = translator.get('help.log_level'), default = config.get_str_value('misc', 'log_level', 'info'), choices = facefusion.choices.log_levels)
job_store.register_job_keys([ 'log_level' ])
return program
@@ -258,27 +261,27 @@ def create_log_level_program() -> ArgumentParser:
def create_halt_on_error_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_misc = program.add_argument_group('misc')
group_misc.add_argument('--halt-on-error', help = wording.get('help.halt_on_error'), action = 'store_true', default = config.get_bool_value('misc', 'halt_on_error'))
group_misc.add_argument('--halt-on-error', help = translator.get('help.halt_on_error'), action = 'store_true', default = config.get_bool_value('misc', 'halt_on_error'))
job_store.register_job_keys([ 'halt_on_error' ])
return program
def create_job_id_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
program.add_argument('job_id', help = wording.get('help.job_id'))
program.add_argument('job_id', help = translator.get('help.job_id'))
job_store.register_job_keys([ 'job_id' ])
return program
def create_job_status_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
program.add_argument('job_status', help = wording.get('help.job_status'), choices = facefusion.choices.job_statuses)
program.add_argument('job_status', help = translator.get('help.job_status'), choices = facefusion.choices.job_statuses)
return program
def create_step_index_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
program.add_argument('step_index', help = wording.get('help.step_index'), type = int)
program.add_argument('step_index', help = translator.get('help.step_index'), type = int)
return program
@@ -296,27 +299,29 @@ def create_program() -> ArgumentParser:
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
sub_program = program.add_subparsers(dest = 'command')
# general
sub_program.add_parser('run', help = wording.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = wording.get('help.benchmark'), parents = [ create_temp_path_program(), collect_step_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('run', help = translator.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('headless-run', help = translator.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = translator.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = translator.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = translator.get('help.benchmark'), parents = [ create_temp_path_program(), collect_step_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
# info
sub_program.add_parser('licenses', help = 'List model licenses', parents = [ create_log_level_program() ], formatter_class = create_help_formatter_large)
# job manager
sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-list', help = translator.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-create', help = translator.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit', help = translator.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-submit-all', help = translator.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete', help = translator.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-delete-all', help = translator.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-add-step', help = translator.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remix-step', help = translator.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-insert-step', help = translator.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-remove-step', help = translator.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
# job runner
sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-run', help = translator.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-run-all', help = translator.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry', help = translator.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-retry-all', help = translator.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large)
return ArgumentParser(parents = [ program ], formatter_class = create_help_formatter_small)

7
facefusion/sanitizer.py Normal file
View File

@@ -0,0 +1,7 @@
from typing import Sequence
def sanitize_int_range(value : int, int_range : Sequence[int]) -> int:
if value in int_range:
return value
return int_range[0]

View File

@@ -8,20 +8,20 @@ import cv2
import numpy
from tqdm import tqdm
from facefusion import ffmpeg_builder, logger, state_manager, wording
from facefusion import ffmpeg_builder, logger, state_manager, translator
from facefusion.audio import create_empty_audio_frame
from facefusion.content_analyser import analyse_stream
from facefusion.ffmpeg import open_ffmpeg
from facefusion.filesystem import is_directory
from facefusion.processors.core import get_processors_modules
from facefusion.types import Fps, StreamMode, VisionFrame
from facefusion.vision import read_static_images
from facefusion.vision import extract_vision_mask, read_static_images
def multi_process_capture(camera_capture : cv2.VideoCapture, camera_fps : Fps) -> Generator[VisionFrame, None, None]:
capture_deque : Deque[VisionFrame] = deque()
with tqdm(desc = wording.get('streaming'), unit = 'frame', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with tqdm(desc = translator.get('streaming'), unit = 'frame', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
with ThreadPoolExecutor(max_workers = state_manager.get_item('execution_thread_count')) as executor:
futures = []
@@ -49,18 +49,20 @@ def process_stream_frame(target_vision_frame : VisionFrame) -> VisionFrame:
source_audio_frame = create_empty_audio_frame()
source_voice_frame = create_empty_audio_frame()
temp_vision_frame = target_vision_frame.copy()
temp_vision_mask = extract_vision_mask(temp_vision_frame)
for processor_module in get_processors_modules(state_manager.get_item('processors')):
logger.disable()
if processor_module.pre_process('stream'):
logger.enable()
temp_vision_frame = processor_module.process_frame(
temp_vision_frame, temp_vision_mask = processor_module.process_frame(
{
'source_vision_frames': source_vision_frames,
'source_audio_frame': source_audio_frame,
'source_voice_frame': source_voice_frame,
'target_vision_frame': target_vision_frame,
'temp_vision_frame': temp_vision_frame
'temp_vision_frame': temp_vision_frame,
'temp_vision_mask': temp_vision_mask
})
logger.enable()
@@ -93,6 +95,6 @@ def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps :
commands.extend(ffmpeg_builder.set_output(device_path))
else:
logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__)
logger.error(translator.get('stream_not_loaded').format(stream_mode = stream_mode), __name__)
return open_ffmpeg(commands)

View File

@@ -2,7 +2,7 @@ from datetime import datetime, timedelta
from time import time
from typing import Optional, Tuple
from facefusion import wording
from facefusion import translator
def get_current_date_time() -> datetime:
@@ -25,9 +25,9 @@ def describe_time_ago(date_time : datetime) -> Optional[str]:
days, hours, minutes, _ = split_time_delta(time_ago)
if timedelta(days = 1) < time_ago:
return wording.get('time_ago_days').format(days = days, hours = hours, minutes = minutes)
return translator.get('time_ago_days').format(days = days, hours = hours, minutes = minutes)
if timedelta(hours = 1) < time_ago:
return wording.get('time_ago_hours').format(hours = hours, minutes = minutes)
return translator.get('time_ago_hours').format(hours = hours, minutes = minutes)
if timedelta(minutes = 1) < time_ago:
return wording.get('time_ago_minutes').format(minutes = minutes)
return wording.get('time_ago_now')
return translator.get('time_ago_minutes').format(minutes = minutes)
return translator.get('time_ago_now')

35
facefusion/translator.py Normal file
View File

@@ -0,0 +1,35 @@
import importlib
from typing import Optional
from facefusion.types import Language, LocalPoolSet, Locals
LOCAL_POOL_SET : LocalPoolSet = {}
CURRENT_LANGUAGE : Language = 'en'
def __autoload__(module_name : str) -> None:
try:
__locals__ = importlib.import_module(module_name + '.locals')
load(__locals__.LOCALS, module_name)
except ImportError:
pass
def load(__locals__ : Locals, module_name : str) -> None:
LOCAL_POOL_SET[module_name] = __locals__
def get(notation : str, module_name : str = 'facefusion') -> Optional[str]:
if module_name not in LOCAL_POOL_SET:
__autoload__(module_name)
current = LOCAL_POOL_SET.get(module_name).get(CURRENT_LANGUAGE)
for fragment in notation.split('.'):
if fragment in current:
current = current.get(fragment)
if isinstance(current, str):
return current
return None

View File

@@ -50,6 +50,10 @@ FaceStore = TypedDict('FaceStore',
'static_faces' : FaceSet
})
Language = Literal['en']
Locals : TypeAlias = Dict[Language, Dict[str, Any]]
LocalPoolSet : TypeAlias = Dict[str, Locals]
VideoCaptureSet : TypeAlias = Dict[str, cv2.VideoCapture]
VideoWriterSet : TypeAlias = Dict[str, cv2.VideoWriter]
CameraCaptureSet : TypeAlias = Dict[str, cv2.VideoCapture]
@@ -63,6 +67,7 @@ CameraPoolSet = TypedDict('CameraPoolSet',
'capture': CameraCaptureSet
})
ColorMode = Literal['rgb', 'rgba']
VisionFrame : TypeAlias = NDArray[Any]
Mask : TypeAlias = NDArray[Any]
Points : TypeAlias = NDArray[Any]
@@ -83,7 +88,9 @@ VoiceChunk : TypeAlias = NDArray[Any]
Fps : TypeAlias = float
Duration : TypeAlias = float
Color : TypeAlias = Tuple[int, int, int, int]
Padding : TypeAlias = Tuple[int, int, int, int]
Margin : TypeAlias = Tuple[int, int, int, int]
Orientation = Literal['landscape', 'portrait']
Resolution : TypeAlias = Tuple[int, int]
@@ -94,7 +101,8 @@ ProcessStep : TypeAlias = Callable[[str, int, Args], bool]
Content : TypeAlias = Dict[str, Any]
Commands : TypeAlias = List[str]
Command : TypeAlias = str
CommandSet : TypeAlias = Dict[str, List[Command]]
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128', 'dfl_whole_face', 'ffhq_512', 'mtcnn_512', 'styleganex_384']
WarpTemplateSet : TypeAlias = Dict[WarpTemplate, NDArray[Any]]
@@ -104,8 +112,8 @@ ErrorCode = Literal[0, 1, 2, 3, 4]
LogLevel = Literal['error', 'warn', 'info', 'debug']
LogLevelSet : TypeAlias = Dict[LogLevel, int]
TableHeaders = List[str]
TableContents = List[List[Any]]
TableHeader : TypeAlias = str
TableContent : TypeAlias = Any
FaceDetectorModel = Literal['many', 'retinaface', 'scrfd', 'yolo_face', 'yunet']
FaceLandmarkerModel = Literal['many', '2dfan4', 'peppa_wutz']
@@ -124,7 +132,7 @@ VoiceExtractorModel = Literal['kim_vocal_1', 'kim_vocal_2', 'uvr_mdxnet']
AudioFormat = Literal['flac', 'm4a', 'mp3', 'ogg', 'opus', 'wav']
ImageFormat = Literal['bmp', 'jpeg', 'png', 'tiff', 'webp']
VideoFormat = Literal['avi', 'm4v', 'mkv', 'mov', 'mp4', 'webm', 'wmv']
VideoFormat = Literal['avi', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mxf', 'webm', 'wmv']
TempFrameFormat = Literal['bmp', 'jpeg', 'png', 'tiff']
AudioTypeSet : TypeAlias = Dict[AudioFormat, str]
ImageTypeSet : TypeAlias = Dict[ImageFormat, str]
@@ -267,6 +275,7 @@ StateKey = Literal\
'benchmark_cycle_count',
'face_detector_model',
'face_detector_size',
'face_detector_margin',
'face_detector_angles',
'face_detector_score',
'face_landmarker_model',
@@ -336,6 +345,7 @@ State = TypedDict('State',
'benchmark_cycle_count' : int,
'face_detector_model' : FaceDetectorModel,
'face_detector_size' : str,
'face_detector_margin': Margin,
'face_detector_angles' : List[Angle],
'face_detector_score' : Score,
'face_landmarker_model' : FaceLandmarkerModel,

View File

@@ -8,15 +8,19 @@
max-width: 110em;
}
:root:root:root:root input[type="number"]
:root:root:root:root .tab-like-container input[type="number"]
{
appearance: textfield;
border-radius: unset;
text-align: center;
order: 1;
padding: unset
}
:root:root:root:root input[type="number"]
{
appearance: textfield;
}
:root:root:root:root input[type="number"]::-webkit-inner-spin-button
{
appearance: none;
@@ -132,6 +136,9 @@
:root:root:root:root .image-frame
{
background-image: conic-gradient(#fff 90deg, #999 90deg 180deg, #fff 180deg 270deg, #999 270deg);
background-size: 1.25rem 1.25rem;
background-repeat: repeat;
width: 100%;
}

View File

@@ -1,6 +1,6 @@
from typing import List
from typing import Dict, List
from facefusion.types import WebcamMode
from facefusion.types import Color, WebcamMode
from facefusion.uis.types import JobManagerAction, JobRunnerAction, PreviewMode
job_manager_actions : List[JobManagerAction] = [ 'job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]
@@ -13,3 +13,13 @@ preview_resolutions : List[str] = [ '512x512', '768x768', '1024x1024' ]
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080' ]
background_remover_colors : Dict[str, Color] =\
{
'red' : (255, 0, 0, 255),
'green' : (0, 255, 0, 255),
'blue' : (0, 0, 255, 255),
'black' : (0, 0, 0, 255),
'white' : (255, 255, 255, 255),
'alpha' : (0, 0, 0, 0)
}

View File

@@ -3,7 +3,7 @@ from typing import Optional
import gradio
from facefusion import metadata, wording
from facefusion import metadata, translator
METADATA_BUTTON : Optional[gradio.Button] = None
ACTION_BUTTON : Optional[gradio.Button] = None
@@ -16,16 +16,16 @@ def render() -> None:
action = random.choice(
[
{
'wording': wording.get('about.become_a_member'),
'translator': translator.get('about.fund'),
'url': 'https://fund.facefusion.io'
},
{
'translator': translator.get('about.subscribe'),
'url': 'https://subscribe.facefusion.io'
},
{
'wording': wording.get('about.join_our_community'),
'translator': translator.get('about.join'),
'url': 'https://join.facefusion.io'
},
{
'wording': wording.get('about.read_the_documentation'),
'url': 'https://docs.facefusion.io'
}
])
@@ -35,7 +35,7 @@ def render() -> None:
link = metadata.get('url')
)
ACTION_BUTTON = gradio.Button(
value = action.get('wording'),
value = action.get('translator'),
link = action.get('url'),
size = 'sm'
)

View File

@@ -2,11 +2,11 @@ from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, wording
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_float_step
from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module
from facefusion.processors.types import AgeModifierModel
from facefusion.processors.modules.age_modifier import choices as age_modifier_choices
from facefusion.processors.modules.age_modifier.types import AgeModifierModel
from facefusion.uis.core import get_ui_component, register_ui_component
AGE_MODIFIER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -19,17 +19,17 @@ def render() -> None:
has_age_modifier = 'age_modifier' in state_manager.get_item('processors')
AGE_MODIFIER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.age_modifier_model_dropdown'),
choices = processors_choices.age_modifier_models,
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.age_modifier'),
choices = age_modifier_choices.age_modifier_models,
value = state_manager.get_item('age_modifier_model'),
visible = has_age_modifier
)
AGE_MODIFIER_DIRECTION_SLIDER = gradio.Slider(
label = wording.get('uis.age_modifier_direction_slider'),
label = translator.get('uis.direction_slider', 'facefusion.processors.modules.age_modifier'),
value = state_manager.get_item('age_modifier_direction'),
step = calculate_float_step(processors_choices.age_modifier_direction_range),
minimum = processors_choices.age_modifier_direction_range[0],
maximum = processors_choices.age_modifier_direction_range[-1],
step = calculate_float_step(age_modifier_choices.age_modifier_direction_range),
minimum = age_modifier_choices.age_modifier_direction_range[0],
maximum = age_modifier_choices.age_modifier_direction_range[-1],
visible = has_age_modifier
)
register_ui_component('age_modifier_model_dropdown', AGE_MODIFIER_MODEL_DROPDOWN)

View File

@@ -0,0 +1,107 @@
from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors.core import load_processor_module
from facefusion.processors.modules.background_remover import choices as background_remover_choices
from facefusion.processors.modules.background_remover.types import BackgroundRemoverModel
from facefusion.sanitizer import sanitize_int_range
from facefusion.uis.core import get_ui_component, register_ui_component
BACKGROUND_REMOVER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
BACKGROUND_REMOVER_COLOR_WRAPPER : Optional[gradio.Group] = None
BACKGROUND_REMOVER_COLOR_RED_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_GREEN_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_BLUE_NUMBER : Optional[gradio.Number] = None
BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER : Optional[gradio.Number] = None
def render() -> None:
global BACKGROUND_REMOVER_MODEL_DROPDOWN
global BACKGROUND_REMOVER_COLOR_WRAPPER
global BACKGROUND_REMOVER_COLOR_RED_NUMBER
global BACKGROUND_REMOVER_COLOR_GREEN_NUMBER
global BACKGROUND_REMOVER_COLOR_BLUE_NUMBER
global BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER
has_background_remover = 'background_remover' in state_manager.get_item('processors')
background_remover_color = state_manager.get_item('background_remover_color')
BACKGROUND_REMOVER_MODEL_DROPDOWN = gradio.Dropdown(
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.background_remover'),
choices = background_remover_choices.background_remover_models,
value = state_manager.get_item('background_remover_model'),
visible = has_background_remover
)
with gradio.Group(visible = has_background_remover) as BACKGROUND_REMOVER_COLOR_WRAPPER:
with gradio.Row():
BACKGROUND_REMOVER_COLOR_RED_NUMBER = gradio.Number(
label = translator.get('uis.color_red_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[0],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
BACKGROUND_REMOVER_COLOR_GREEN_NUMBER = gradio.Number(
label = translator.get('uis.color_green_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[1],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
with gradio.Row():
BACKGROUND_REMOVER_COLOR_BLUE_NUMBER = gradio.Number(
label = translator.get('uis.color_blue_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[2],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER = gradio.Number(
label = translator.get('uis.color_alpha_number', 'facefusion.processors.modules.background_remover'),
value = background_remover_color[3],
minimum = background_remover_choices.background_remover_color_range[0],
maximum = background_remover_choices.background_remover_color_range[-1],
step = calculate_int_step(background_remover_choices.background_remover_color_range)
)
register_ui_component('background_remover_model_dropdown', BACKGROUND_REMOVER_MODEL_DROPDOWN)
register_ui_component('background_remover_color_red_number', BACKGROUND_REMOVER_COLOR_RED_NUMBER)
register_ui_component('background_remover_color_green_number', BACKGROUND_REMOVER_COLOR_GREEN_NUMBER)
register_ui_component('background_remover_color_blue_number', BACKGROUND_REMOVER_COLOR_BLUE_NUMBER)
register_ui_component('background_remover_color_alpha_number', BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER)
def listen() -> None:
BACKGROUND_REMOVER_MODEL_DROPDOWN.change(update_background_remover_model, inputs = BACKGROUND_REMOVER_MODEL_DROPDOWN, outputs = BACKGROUND_REMOVER_MODEL_DROPDOWN)
background_remover_color_inputs = [ BACKGROUND_REMOVER_COLOR_RED_NUMBER, BACKGROUND_REMOVER_COLOR_GREEN_NUMBER, BACKGROUND_REMOVER_COLOR_BLUE_NUMBER, BACKGROUND_REMOVER_COLOR_ALPHA_NUMBER ]
for background_remover_color_input in background_remover_color_inputs:
background_remover_color_input.change(update_background_remover_color, inputs = background_remover_color_inputs)
processors_checkbox_group = get_ui_component('processors_checkbox_group')
if processors_checkbox_group:
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [BACKGROUND_REMOVER_MODEL_DROPDOWN, BACKGROUND_REMOVER_COLOR_WRAPPER])
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Group]:
has_background_remover = 'background_remover' in processors
return gradio.Dropdown(visible = has_background_remover), gradio.Group(visible = has_background_remover)
def update_background_remover_model(background_remover_model : BackgroundRemoverModel) -> gradio.Dropdown:
background_remover_module = load_processor_module('background_remover')
background_remover_module.clear_inference_pool()
state_manager.set_item('background_remover_model', background_remover_model)
if background_remover_module.pre_check():
return gradio.Dropdown(value = state_manager.get_item('background_remover_model'))
return gradio.Dropdown()
def update_background_remover_color(red : int, green : int, blue : int, alpha : int) -> None:
red = sanitize_int_range(red, background_remover_choices.background_remover_color_range)
green = sanitize_int_range(green, background_remover_choices.background_remover_color_range)
blue = sanitize_int_range(blue, background_remover_choices.background_remover_color_range)
alpha = sanitize_int_range(alpha, background_remover_choices.background_remover_color_range)
state_manager.set_item('background_remover_color', (red, green, blue, alpha))

View File

@@ -2,7 +2,7 @@ from typing import Any, Generator, List, Optional
import gradio
from facefusion import benchmarker, state_manager, wording
from facefusion import benchmarker, state_manager, translator
BENCHMARK_BENCHMARKS_DATAFRAME : Optional[gradio.Dataframe] = None
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
@@ -34,7 +34,7 @@ def render() -> None:
show_label = False
)
BENCHMARK_START_BUTTON = gradio.Button(
value = wording.get('uis.start_button'),
value = translator.get('uis.start_button'),
variant = 'primary',
size = 'sm'
)

View File

@@ -3,7 +3,7 @@ from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import state_manager, wording
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.types import BenchmarkMode, BenchmarkResolution
@@ -18,17 +18,17 @@ def render() -> None:
global BENCHMARK_CYCLE_COUNT_SLIDER
BENCHMARK_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.benchmark_mode_dropdown'),
label = translator.get('uis.benchmark_mode_dropdown'),
choices = facefusion.choices.benchmark_modes,
value = state_manager.get_item('benchmark_mode')
)
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('uis.benchmark_resolutions_checkbox_group'),
label = translator.get('uis.benchmark_resolutions_checkbox_group'),
choices = facefusion.choices.benchmark_resolutions,
value = state_manager.get_item('benchmark_resolutions')
)
BENCHMARK_CYCLE_COUNT_SLIDER = gradio.Slider(
label = wording.get('uis.benchmark_cycle_count_slider'),
label = translator.get('uis.benchmark_cycle_count_slider'),
value = state_manager.get_item('benchmark_cycle_count'),
step = calculate_int_step(facefusion.choices.benchmark_cycle_count_range),
minimum = facefusion.choices.benchmark_cycle_count_range[0],

View File

@@ -2,7 +2,7 @@ from typing import List, Optional
import gradio
from facefusion import state_manager, wording
from facefusion import state_manager, translator
from facefusion.uis import choices as uis_choices
COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
@@ -17,7 +17,7 @@ def render() -> None:
common_options.append('keep-temp')
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
label = wording.get('uis.common_options_checkbox_group'),
label = translator.get('uis.common_options_checkbox_group'),
choices = uis_choices.common_options,
value = common_options
)

View File

@@ -2,11 +2,11 @@ from typing import List, Optional, Tuple
import gradio
from facefusion import state_manager, wording
from facefusion import state_manager, translator
from facefusion.common_helper import calculate_int_step
from facefusion.processors import choices as processors_choices
from facefusion.processors.core import load_processor_module
from facefusion.processors.types import DeepSwapperModel
from facefusion.processors.modules.deep_swapper import choices as deep_swapper_choices
from facefusion.processors.modules.deep_swapper.types import DeepSwapperModel
from facefusion.uis.core import get_ui_component, register_ui_component
DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -19,17 +19,17 @@ def render() -> None:
has_deep_swapper = 'deep_swapper' in state_manager.get_item('processors')
DEEP_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.deep_swapper_model_dropdown'),
choices = processors_choices.deep_swapper_models,
label = translator.get('uis.model_dropdown', 'facefusion.processors.modules.deep_swapper'),
choices = deep_swapper_choices.deep_swapper_models,
value = state_manager.get_item('deep_swapper_model'),
visible = has_deep_swapper
)
DEEP_SWAPPER_MORPH_SLIDER = gradio.Slider(
label = wording.get('uis.deep_swapper_morph_slider'),
label = translator.get('uis.morph_slider', 'facefusion.processors.modules.deep_swapper'),
value = state_manager.get_item('deep_swapper_morph'),
step = calculate_int_step(processors_choices.deep_swapper_morph_range),
minimum = processors_choices.deep_swapper_morph_range[0],
maximum = processors_choices.deep_swapper_morph_range[-1],
step = calculate_int_step(deep_swapper_choices.deep_swapper_morph_range),
minimum = deep_swapper_choices.deep_swapper_morph_range[0],
maximum = deep_swapper_choices.deep_swapper_morph_range[-1],
visible = has_deep_swapper and load_processor_module('deep_swapper').get_inference_pool() and load_processor_module('deep_swapper').has_morph_input()
)
register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN)

View File

@@ -3,7 +3,7 @@ from typing import List, Optional
import gradio
import facefusion.choices
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, translator, voice_extractor
from facefusion.filesystem import get_file_name, resolve_file_paths
from facefusion.processors.core import get_processors_modules
from facefusion.types import DownloadProvider
@@ -15,7 +15,7 @@ def render() -> None:
global DOWNLOAD_PROVIDERS_CHECKBOX_GROUP
DOWNLOAD_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('uis.download_providers_checkbox_group'),
label = translator.get('uis.download_providers_checkbox_group'),
choices = facefusion.choices.download_providers,
value = state_manager.get_item('download_providers')
)

Some files were not shown because too many files have changed in this diff Show More