diff --git a/facefusion/processors/choices.py b/facefusion/processors/choices.py index 9f386bd..4bc4b92 100755 --- a/facefusion/processors/choices.py +++ b/facefusion/processors/choices.py @@ -218,3 +218,4 @@ face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1) face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05) frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1) frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1) +lip_syncer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05) diff --git a/facefusion/processors/modules/lip_syncer.py b/facefusion/processors/modules/lip_syncer.py index ac38cbb..2aae3aa 100755 --- a/facefusion/processors/modules/lip_syncer.py +++ b/facefusion/processors/modules/lip_syncer.py @@ -9,6 +9,7 @@ import facefusion.jobs.job_manager import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, voice_extractor, wording +from facefusion.common_helper import create_float_metavar from facefusion.audio import create_empty_audio_frame, get_voice_frame, read_static_voice from facefusion.common_helper import get_first from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url @@ -94,11 +95,13 @@ def register_args(program : ArgumentParser) -> None: group_processors = find_argument_group(program, 'processors') if group_processors: group_processors.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('processors', 'lip_syncer_model', 'wav2lip_gan_96'), choices = processors_choices.lip_syncer_models) - facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model' ]) + group_processors.add_argument('--lip-syncer-weight', help = wording.get('help.lip_syncer_weight'), type = float, default = config.get_float_value('processors', 'lip_syncer_weight', '0.5'), choices = processors_choices.lip_syncer_weight_range, metavar = create_float_metavar(processors_choices.lip_syncer_weight_range)) + facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model', 'lip_syncer_weight' ]) def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('lip_syncer_model', args.get('lip_syncer_model')) + apply_state_item('lip_syncer_weight', args.get('lip_syncer_weight')) def pre_check() -> bool: @@ -186,6 +189,7 @@ def prepare_audio_frame(temp_audio_frame : AudioFrame) -> AudioFrame: temp_audio_frame = numpy.maximum(numpy.exp(-5 * numpy.log(10)), temp_audio_frame) temp_audio_frame = numpy.log10(temp_audio_frame) * 1.6 + 3.2 temp_audio_frame = temp_audio_frame.clip(-4, 4).astype(numpy.float32) + temp_audio_frame = temp_audio_frame * (state_manager.get_item('lip_syncer_weight') * 2.0) temp_audio_frame = numpy.expand_dims(temp_audio_frame, axis = (0, 1)) return temp_audio_frame diff --git a/facefusion/processors/types.py b/facefusion/processors/types.py index e95879d..a499c30 100644 --- a/facefusion/processors/types.py +++ b/facefusion/processors/types.py @@ -103,7 +103,8 @@ ProcessorStateKey = Literal\ 'frame_colorizer_blend', 'frame_enhancer_model', 'frame_enhancer_blend', - 'lip_syncer_model' + 'lip_syncer_model', + 'lip_syncer_weight' ] ProcessorState = TypedDict('ProcessorState', { diff --git a/facefusion/uis/components/lip_syncer_options.py b/facefusion/uis/components/lip_syncer_options.py index ce2fa2f..e253ee9 100755 --- a/facefusion/uis/components/lip_syncer_options.py +++ b/facefusion/uis/components/lip_syncer_options.py @@ -1,18 +1,21 @@ -from typing import List, Optional +from typing import List, Optional, Tuple import gradio from facefusion import state_manager, wording +from facefusion.common_helper import calc_float_step from facefusion.processors import choices as processors_choices from facefusion.processors.core import load_processor_module from facefusion.processors.types import LipSyncerModel from facefusion.uis.core import get_ui_component, register_ui_component LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None +LIP_SYNCER_WEIGHT_SLIDER : Optional[gradio.Slider] = None def render() -> None: global LIP_SYNCER_MODEL_DROPDOWN + global LIP_SYNCER_WEIGHT_SLIDER has_lip_syncer = 'lip_syncer' in state_manager.get_item('processors') LIP_SYNCER_MODEL_DROPDOWN = gradio.Dropdown( @@ -21,20 +24,30 @@ def render() -> None: value = state_manager.get_item('lip_syncer_model'), visible = has_lip_syncer ) + LIP_SYNCER_WEIGHT_SLIDER = gradio.Slider( + label = wording.get('uis.lip_syncer_weight_slider'), + value = state_manager.get_item('lip_syncer_weight'), + step = calc_float_step(processors_choices.lip_syncer_weight_range), + minimum = processors_choices.lip_syncer_weight_range[0], + maximum = processors_choices.lip_syncer_weight_range[-1], + visible = has_lip_syncer + ) register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN) + register_ui_component('lip_syncer_weight_slider', LIP_SYNCER_WEIGHT_SLIDER) def listen() -> None: LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN) + LIP_SYNCER_WEIGHT_SLIDER.release(update_lip_syncer_weight, inputs = LIP_SYNCER_WEIGHT_SLIDER) processors_checkbox_group = get_ui_component('processors_checkbox_group') if processors_checkbox_group: - processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = LIP_SYNCER_MODEL_DROPDOWN) + processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ LIP_SYNCER_MODEL_DROPDOWN, LIP_SYNCER_WEIGHT_SLIDER ]) -def remote_update(processors : List[str]) -> gradio.Dropdown: +def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]: has_lip_syncer = 'lip_syncer' in processors - return gradio.Dropdown(visible = has_lip_syncer) + return gradio.Dropdown(visible = has_lip_syncer), gradio.Slider(visible = has_lip_syncer) def update_lip_syncer_model(lip_syncer_model : LipSyncerModel) -> gradio.Dropdown: @@ -45,3 +58,7 @@ def update_lip_syncer_model(lip_syncer_model : LipSyncerModel) -> gradio.Dropdow if lip_syncer_module.pre_check(): return gradio.Dropdown(value = state_manager.get_item('lip_syncer_model')) return gradio.Dropdown() + + +def update_lip_syncer_weight(lip_syncer_weight : float) -> None: + state_manager.set_item('lip_syncer_weight', lip_syncer_weight) diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index f1ad185..9808467 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -131,6 +131,7 @@ def listen() -> None: 'face_enhancer_weight_slider', 'frame_colorizer_blend_slider', 'frame_enhancer_blend_slider', + 'lip_syncer_weight_slider', 'reference_face_distance_slider', 'face_selector_age_range_slider', 'face_mask_blur_slider', diff --git a/facefusion/uis/types.py b/facefusion/uis/types.py index 910314c..c6e8fb3 100644 --- a/facefusion/uis/types.py +++ b/facefusion/uis/types.py @@ -61,6 +61,7 @@ ComponentName = Literal\ 'frame_enhancer_model_dropdown', 'job_list_job_status_checkbox_group', 'lip_syncer_model_dropdown', + 'lip_syncer_weight_slider', 'output_image', 'output_video', 'output_video_fps_slider', diff --git a/facefusion/wording.py b/facefusion/wording.py index 8b3bb99..de2f8ca 100755 --- a/facefusion/wording.py +++ b/facefusion/wording.py @@ -303,6 +303,7 @@ WORDING : Dict[str, Any] =\ 'job_runner_job_action_dropdown': 'JOB ACTION', 'job_runner_job_id_dropdown': 'JOB ID', 'lip_syncer_model_dropdown': 'LIP SYNCER MODEL', + 'lip_syncer_weight_slider': 'LIP SYNCER WEIGHT', 'log_level_dropdown': 'LOG LEVEL', 'output_audio_encoder_dropdown': 'OUTPUT AUDIO ENCODER', 'output_audio_quality_slider': 'OUTPUT AUDIO QUALITY',