* Add real_hatgan_x4 model

* Mark it as NEXT

* Force download to be executed and exit

* Fix frame per second interpolation

* 5 to 68 landmark (#456)

* changes

* changes

* Adjust model url

* Cleanup 5 to 68 landmark convertion

* Move everything to face analyser

* Introduce matrix only face helper

* Revert facefusion.ini

* Adjust limit due false positive analysis

* changes (#457)

* Use pixel format yuv422p to merge video

* Fix some code

* Minor cleanup

* Add gpen_bfr_1024 and gpen_bfr_2048

* Revert it back to yuv420p due compatibility issues

* Add debug back to ffmpeg

* Add debug back to ffmpeg

* Migrate to conda (#461)

* Migrate from venv to conda

* Migrate from venv to conda

* Message when conda is not activated

* Use release for every slider (#463)

* Use release event handler for every slider

* Move more sliders to release handler

* Move more sliders to release handler

* Add get_ui_components() to simplify code

* Revert some changes on frame slider

* Add the first iteration of a frame colorizer

* Support for the DDColor model

* Improve model file handling

* Improve model file handling part2

* Remove deoldify

* Remove deoldify

* Voice separator (#468)

* changes

* changes

* changes

* changes

* changes

* changes

* Rename audio extractor to voice extractor

* Cosmetic changes

* Cosmetic changes

* Fix fps lowering and boosting

* Fix fps lowering and boosting

* Fix fps lowering and boosting

* Some refactoring for audio.py and some astype() here and there (#470)

* Some refactoring for audio.py and some astype() here and there

* Fix lint

* Spacing

* Add mp3 to benchmark suite for lip syncer testing

* Improve naming

* Adjust chunk size

* Use higher quality

* Revert "Use higher quality"

This reverts commit d32f28757251ecc0f48214073adf54f3631b1289.

* Improve naming in ffmpeg.py

* Simplify code

* Better fps calculation

* Fix naming here and there

* Add back real esrgan x2

* Remove trailing comma

* Update wording and README

* Use semaphore to prevent frame colorizer memory issues

* Revert "Remove deoldify"

This reverts commit bd8034cbc71fe701f78dddec3057dc98593b2162.

* Remove unused type from frame colorizer

* Adjust naming

* Add missing clear of model initializer

* Change nvenc preset mappping to support old FFMPEG 4

* Update onnxruntime to 1.17.1

* Fix lint

* Prepare 2.5.0

* Fix Gradio overrides

* Add Deoldify Artistic back

* Feat/audio refactoring (#476)

* Improve audio naming and variables

* Improve audio naming and variables

* Refactor voice extractor like crazy

* Refactor voice extractor like crazy

* Remove spaces

* Update the usage

---------

Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
This commit is contained in:
Henry Ruhs
2024-04-09 15:40:55 +02:00
committed by GitHub
parent 6e67d7bff6
commit 4ccf4c24c7
45 changed files with 1007 additions and 405 deletions

View File

@@ -76,7 +76,7 @@ def listen() -> None:
def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
facefusion.globals.source_paths = [ '.assets/examples/source.jpg' ]
facefusion.globals.source_paths = [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ]
facefusion.globals.output_path = tempfile.gettempdir()
facefusion.globals.face_landmarker_score = 0
facefusion.globals.temp_frame_format = 'bmp'
@@ -87,7 +87,8 @@ def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[
if target_paths:
pre_process()
for target_path in target_paths:
benchmark_results.append(benchmark(target_path, benchmark_cycles))
facefusion.globals.target_path = target_path
benchmark_results.append(benchmark(benchmark_cycles))
yield benchmark_results
post_process()
@@ -103,10 +104,8 @@ def post_process() -> None:
clear_static_faces()
def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
def benchmark(benchmark_cycles : int) -> List[Any]:
process_times = []
total_fps = 0.0
facefusion.globals.target_path = target_path
video_frame_total = count_video_frame_total(facefusion.globals.target_path)
output_video_resolution = detect_video_resolution(facefusion.globals.target_path)
facefusion.globals.output_video_resolution = pack_resolution(output_video_resolution)
@@ -116,13 +115,12 @@ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
start_time = perf_counter()
conditional_process()
end_time = perf_counter()
process_time = end_time - start_time
total_fps += video_frame_total / process_time
process_times.append(process_time)
process_times.append(end_time - start_time)
average_run = round(statistics.mean(process_times), 2)
fastest_run = round(min(process_times), 2)
slowest_run = round(max(process_times), 2)
relative_fps = round(total_fps / benchmark_cycles, 2)
relative_fps = round(video_frame_total * benchmark_cycles / sum(process_times), 2)
return\
[
facefusion.globals.target_path,

View File

@@ -21,7 +21,7 @@ def render() -> None:
def listen() -> None:
EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
EXECUTION_QUEUE_COUNT_SLIDER.release(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
def update_execution_queue_count(execution_queue_count : int = 1) -> None:

View File

@@ -21,7 +21,7 @@ def render() -> None:
def listen() -> None:
EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
EXECUTION_THREAD_COUNT_SLIDER.release(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
def update_execution_thread_count(execution_thread_count : int = 1) -> None:

View File

@@ -92,11 +92,11 @@ def render() -> None:
def listen() -> None:
FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
for face_mask_padding_slider in face_mask_padding_sliders:
face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders)
face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders)
def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:

View File

@@ -10,8 +10,7 @@ from facefusion.vision import get_video_frame, read_static_image, normalize_fram
from facefusion.filesystem import is_image, is_video
from facefusion.face_analyser import get_many_faces
from facefusion.typing import VisionFrame, FaceSelectorMode
from facefusion.uis.core import get_ui_component, register_ui_component
from facefusion.uis.typing import ComponentName
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
@@ -59,39 +58,39 @@ def render() -> None:
def listen() -> None:
FACE_SELECTOR_MODE_DROPDOWN.change(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
multi_component_names : List[ComponentName] =\
REFERENCE_FACE_DISTANCE_SLIDER.release(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]
for component_name in multi_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_reference_face_position)
getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_one_component_names : List[ComponentName] =\
]):
for method in [ 'upload', 'change', 'clear' ]:
getattr(ui_component, method)(update_reference_face_position)
getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
for ui_component in get_ui_components(
[
'face_analyser_order_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown'
]
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_two_component_names : List[ComponentName] =\
]):
ui_component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
for ui_component in get_ui_components(
[
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_size_dropdown'
]):
ui_component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
for ui_component in get_ui_components(
[
'face_detector_score_slider',
'face_landmarker_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
]):
ui_component.release(clear_and_update_reference_position_gallery, outputs=REFERENCE_FACE_POSITION_GALLERY)
preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider:
preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)

View File

@@ -5,13 +5,15 @@ import facefusion.globals
from facefusion import face_analyser, wording
from facefusion.processors.frame.core import load_frame_processor_module
from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel, LipSyncerModel
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
from facefusion.uis.core import get_ui_component, register_ui_component
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_COLORIZER_BLEND_SLIDER : Optional[gradio.Slider] = None
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -22,6 +24,8 @@ def render() -> None:
global FACE_ENHANCER_MODEL_DROPDOWN
global FACE_ENHANCER_BLEND_SLIDER
global FACE_SWAPPER_MODEL_DROPDOWN
global FRAME_COLORIZER_MODEL_DROPDOWN
global FRAME_COLORIZER_BLEND_SLIDER
global FRAME_ENHANCER_MODEL_DROPDOWN
global FRAME_ENHANCER_BLEND_SLIDER
global LIP_SYNCER_MODEL_DROPDOWN
@@ -52,6 +56,20 @@ def render() -> None:
value = frame_processors_globals.face_swapper_model,
visible = 'face_swapper' in facefusion.globals.frame_processors
)
FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.frame_colorizer_model_dropdown'),
choices = frame_processors_choices.frame_colorizer_models,
value = frame_processors_globals.frame_colorizer_model,
visible = 'frame_colorizer' in facefusion.globals.frame_processors
)
FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider(
label = wording.get('uis.frame_colorizer_blend_slider'),
value = frame_processors_globals.frame_colorizer_blend,
step = frame_processors_choices.frame_colorizer_blend_range[1] - frame_processors_choices.frame_colorizer_blend_range[0],
minimum = frame_processors_choices.frame_colorizer_blend_range[0],
maximum = frame_processors_choices.frame_colorizer_blend_range[-1],
visible = 'frame_colorizer' in facefusion.globals.frame_processors
)
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.frame_enhancer_model_dropdown'),
choices = frame_processors_choices.frame_enhancer_models,
@@ -76,6 +94,8 @@ def render() -> None:
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN)
register_ui_component('frame_colorizer_blend_slider', FRAME_COLORIZER_BLEND_SLIDER)
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN)
@@ -84,23 +104,26 @@ def render() -> None:
def listen() -> None:
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
FRAME_COLORIZER_MODEL_DROPDOWN.change(update_frame_colorizer_model, inputs = FRAME_COLORIZER_MODEL_DROPDOWN, outputs = FRAME_COLORIZER_MODEL_DROPDOWN)
FRAME_COLORIZER_BLEND_SLIDER.release(update_frame_colorizer_blend, inputs = FRAME_COLORIZER_BLEND_SLIDER)
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
FRAME_ENHANCER_BLEND_SLIDER.release(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN)
frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
if frame_processors_checkbox_group:
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ])
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_COLORIZER_MODEL_DROPDOWN, FRAME_COLORIZER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ])
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
has_face_debugger = 'face_debugger' in frame_processors
has_face_enhancer = 'face_enhancer' in frame_processors
has_face_swapper = 'face_swapper' in frame_processors
has_frame_colorizer = 'frame_colorizer' in frame_processors
has_frame_enhancer = 'frame_enhancer' in frame_processors
has_lip_syncer = 'lip_syncer' in frame_processors
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer)
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_colorizer), gradio.Slider(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer)
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
@@ -132,6 +155,7 @@ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.D
if face_swapper_model == 'uniface_256':
facefusion.globals.face_recognizer_model = 'arcface_uniface'
face_swapper_module = load_frame_processor_module('face_swapper')
face_swapper_module.clear_model_initializer()
face_swapper_module.clear_frame_processor()
face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
if face_analyser.pre_check() and face_swapper_module.pre_check():
@@ -139,6 +163,20 @@ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.D
return gradio.Dropdown()
def update_frame_colorizer_model(frame_colorizer_model : FrameColorizerModel) -> gradio.Dropdown:
frame_processors_globals.frame_colorizer_model = frame_colorizer_model
frame_colorizer_module = load_frame_processor_module('frame_colorizer')
frame_colorizer_module.clear_frame_processor()
frame_colorizer_module.set_options('model', frame_colorizer_module.MODELS[frame_colorizer_model])
if frame_colorizer_module.pre_check():
return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_model)
return gradio.Dropdown()
def update_frame_colorizer_blend(frame_colorizer_blend : int) -> None:
frame_processors_globals.frame_colorizer_blend = frame_colorizer_blend
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
frame_processors_globals.frame_enhancer_model = frame_enhancer_model
frame_enhancer_module = load_frame_processor_module('frame_enhancer')

View File

@@ -6,15 +6,15 @@ import facefusion.choices
from facefusion.typing import VideoMemoryStrategy
from facefusion import wording
VIDEO_MEMORY_STRATEGY : Optional[gradio.Dropdown] = None
VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None
SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global VIDEO_MEMORY_STRATEGY
global VIDEO_MEMORY_STRATEGY_DROPDOWN
global SYSTEM_MEMORY_LIMIT_SLIDER
VIDEO_MEMORY_STRATEGY = gradio.Dropdown(
VIDEO_MEMORY_STRATEGY_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.video_memory_strategy_dropdown'),
choices = facefusion.choices.video_memory_strategies,
value = facefusion.globals.video_memory_strategy
@@ -29,8 +29,8 @@ def render() -> None:
def listen() -> None:
VIDEO_MEMORY_STRATEGY.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY)
SYSTEM_MEMORY_LIMIT_SLIDER.change(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER)
VIDEO_MEMORY_STRATEGY_DROPDOWN.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY_DROPDOWN)
SYSTEM_MEMORY_LIMIT_SLIDER.release(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER)
def update_video_memory_strategy(video_memory_strategy : VideoMemoryStrategy) -> None:

View File

@@ -1,4 +1,4 @@
from typing import Optional, Tuple, List
from typing import Optional, Tuple
import gradio
import facefusion.globals
@@ -6,8 +6,7 @@ import facefusion.choices
from facefusion import wording
from facefusion.typing import OutputVideoEncoder, OutputVideoPreset, Fps
from facefusion.filesystem import is_image, is_video
from facefusion.uis.typing import ComponentName
from facefusion.uis.core import get_ui_component, register_ui_component
from facefusion.uis.core import get_ui_components, register_ui_component
from facefusion.vision import detect_image_resolution, create_image_resolutions, detect_video_fps, detect_video_resolution, create_video_resolutions, pack_resolution
OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None
@@ -98,23 +97,21 @@ def render() -> None:
def listen() -> None:
OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX)
OUTPUT_IMAGE_QUALITY_SLIDER.change(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
OUTPUT_IMAGE_RESOLUTION_DROPDOWN.change(update_output_image_resolution, inputs = OUTPUT_IMAGE_RESOLUTION_DROPDOWN)
OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN)
OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
OUTPUT_VIDEO_RESOLUTION_DROPDOWN.change(update_output_video_resolution, inputs = OUTPUT_VIDEO_RESOLUTION_DROPDOWN)
OUTPUT_VIDEO_FPS_SLIDER.change(update_output_video_fps, inputs = OUTPUT_VIDEO_FPS_SLIDER)
multi_component_names : List[ComponentName] =\
OUTPUT_VIDEO_FPS_SLIDER.release(update_output_video_fps, inputs = OUTPUT_VIDEO_FPS_SLIDER)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]
for component_name in multi_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ])
]):
for method in [ 'upload', 'change', 'clear' ]:
getattr(ui_component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ])
def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider]:

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional
from typing import Any, Dict, Optional
from time import sleep
import cv2
import gradio
@@ -16,8 +16,7 @@ from facefusion.vision import get_video_frame, count_video_frame_total, normaliz
from facefusion.filesystem import is_image, is_video, filter_audio_paths
from facefusion.content_analyser import analyse_frame
from facefusion.processors.frame.core import load_frame_processor_module
from facefusion.uis.typing import ComponentName
from facefusion.uis.core import get_ui_component, register_ui_component
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
PREVIEW_IMAGE : Optional[gradio.Image] = None
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
@@ -72,69 +71,73 @@ def listen() -> None:
reference_face_position_gallery = get_ui_component('reference_face_position_gallery')
if reference_face_position_gallery:
reference_face_position_gallery.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
multi_one_component_names : List[ComponentName] =\
for ui_component in get_ui_components(
[
'source_audio',
'source_image',
'target_image',
'target_video'
]
for component_name in multi_one_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
multi_two_component_names : List[ComponentName] =\
]):
for method in [ 'upload', 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'target_image',
'target_video'
]
for component_name in multi_two_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
change_one_component_names : List[ComponentName] =\
]):
for method in [ 'upload', 'change', 'clear' ]:
getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
for ui_component in get_ui_components(
[
'face_debugger_items_checkbox_group',
'face_selector_mode_dropdown',
'face_mask_types_checkbox_group',
'face_mask_region_checkbox_group',
'face_analyser_order_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown'
]):
ui_component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'face_enhancer_blend_slider',
'frame_colorizer_blend_slider',
'frame_enhancer_blend_slider',
'trim_frame_start_slider',
'trim_frame_end_slider',
'face_selector_mode_dropdown',
'reference_face_distance_slider',
'face_mask_types_checkbox_group',
'face_mask_blur_slider',
'face_mask_padding_top_slider',
'face_mask_padding_bottom_slider',
'face_mask_padding_left_slider',
'face_mask_padding_right_slider',
'face_mask_region_checkbox_group',
'face_analyser_order_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown',
'output_video_fps_slider'
]
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
change_two_component_names : List[ComponentName] =\
]):
ui_component.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'frame_processors_checkbox_group',
'face_enhancer_model_dropdown',
'face_swapper_model_dropdown',
'frame_colorizer_model_dropdown',
'frame_enhancer_model_dropdown',
'lip_syncer_model_dropdown',
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_size_dropdown'
]):
ui_component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
for ui_component in get_ui_components(
[
'face_detector_score_slider',
'face_landmarker_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
]):
ui_component.release(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:

View File

@@ -47,8 +47,8 @@ def render() -> None:
def listen() -> None:
TRIM_FRAME_START_SLIDER.change(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER)
TRIM_FRAME_END_SLIDER.change(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER)
TRIM_FRAME_START_SLIDER.release(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER)
TRIM_FRAME_END_SLIDER.release(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER)
target_video = get_ui_component('target_video')
if target_video:
for method in [ 'upload', 'change', 'clear' ]:

View File

@@ -1,4 +1,4 @@
from typing import Optional, Generator, Deque, List
from typing import Optional, Generator, Deque
import os
import platform
import subprocess
@@ -19,8 +19,8 @@ from facefusion.face_analyser import get_average_face
from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
from facefusion.ffmpeg import open_ffmpeg
from facefusion.vision import normalize_frame_color, read_static_images, unpack_resolution
from facefusion.uis.typing import StreamMode, WebcamMode, ComponentName
from facefusion.uis.core import get_ui_component
from facefusion.uis.typing import StreamMode, WebcamMode
from facefusion.uis.core import get_ui_component, get_ui_components
WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None
WEBCAM_IMAGE : Optional[gradio.Image] = None
@@ -76,7 +76,8 @@ def listen() -> None:
if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider:
start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE)
WEBCAM_STOP_BUTTON.click(stop, cancels = start_event)
change_two_component_names : List[ComponentName] =\
for ui_component in get_ui_components(
[
'frame_processors_checkbox_group',
'face_swapper_model_dropdown',
@@ -84,11 +85,8 @@ def listen() -> None:
'frame_enhancer_model_dropdown',
'lip_syncer_model_dropdown',
'source_image'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(update, cancels = start_event)
]):
ui_component.change(update, cancels = start_event)
def start(webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: