diff --git a/facefusion.ini b/facefusion.ini index 6306b51..172ecaf 100644 --- a/facefusion.ini +++ b/facefusion.ini @@ -35,9 +35,10 @@ reference_frame_number = face_occluder_model = face_parser_model = face_mask_types = +face_mask_areas = +face_mask_regions = face_mask_blur = face_mask_padding = -face_mask_regions = [frame_extraction] trim_frame_start = diff --git a/facefusion/args.py b/facefusion/args.py index 71ca179..929713d 100644 --- a/facefusion/args.py +++ b/facefusion/args.py @@ -74,9 +74,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('face_occluder_model', args.get('face_occluder_model')) apply_state_item('face_parser_model', args.get('face_parser_model')) apply_state_item('face_mask_types', args.get('face_mask_types')) + apply_state_item('face_mask_areas', args.get('face_mask_areas')) + apply_state_item('face_mask_regions', args.get('face_mask_regions')) apply_state_item('face_mask_blur', args.get('face_mask_blur')) apply_state_item('face_mask_padding', normalize_padding(args.get('face_mask_padding'))) - apply_state_item('face_mask_regions', args.get('face_mask_regions')) # frame extraction apply_state_item('trim_frame_start', args.get('trim_frame_start')) apply_state_item('trim_frame_end', args.get('trim_frame_end')) diff --git a/facefusion/choices.py b/facefusion/choices.py index f925e92..eaab50b 100755 --- a/facefusion/choices.py +++ b/facefusion/choices.py @@ -2,7 +2,7 @@ import logging from typing import List, Sequence from facefusion.common_helper import create_float_range, create_int_range -from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, WebcamMode +from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskRegionSet, FaceMaskArea, FaceMaskAreaSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, WebcamMode face_detector_set : FaceDetectorSet =\ { @@ -19,7 +19,13 @@ face_selector_genders : List[Gender] = [ 'female', 'male' ] face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ] face_occluder_models : List[FaceOccluderModel] = [ 'xseg_1', 'xseg_2', 'xseg_3' ] face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ] -face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ] +face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'area', 'region' ] +face_mask_area_set : FaceMaskAreaSet =\ +{ + 'upper-head': [ 2, 14, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47 ], + 'lower-head': [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67 ], + 'mouth': [ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67 ] +} face_mask_region_set : FaceMaskRegionSet =\ { 'skin': 1, @@ -33,6 +39,7 @@ face_mask_region_set : FaceMaskRegionSet =\ 'upper-lip': 12, 'lower-lip': 13 } +face_mask_areas : List[FaceMaskArea] = list(face_mask_area_set.keys()) face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys()) audio_type_set : AudioTypeSet =\ diff --git a/facefusion/face_masker.py b/facefusion/face_masker.py index 46c8402..1e64c50 100755 --- a/facefusion/face_masker.py +++ b/facefusion/face_masker.py @@ -10,7 +10,7 @@ from facefusion import inference_manager, state_manager from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.types import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame +from facefusion.types import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskArea, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame @lru_cache(maxsize = None) @@ -183,6 +183,20 @@ def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask: return occlusion_mask +def create_area_mask(face_landmark_68 : FaceLandmark68, face_mask_areas : List[FaceMaskArea]) -> Mask: + landmark_points = [] + + for face_mask_area in face_mask_areas: + if face_mask_area in facefusion.choices.face_mask_area_set: + landmark_points.extend(facefusion.choices.face_mask_area_set[face_mask_area]) + + convex_hull = cv2.convexHull(face_landmark_68[landmark_points].astype(numpy.int32)) + area_mask = numpy.zeros((512, 512)).astype(numpy.float32) + cv2.fillConvexPoly(area_mask, convex_hull, 1.0) # type: ignore[call-overload] + area_mask = (cv2.GaussianBlur(area_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2 + return area_mask + + def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask: model_name = state_manager.get_item('face_parser_model') model_size = create_static_model_set('full').get(model_name).get('size') @@ -199,15 +213,6 @@ def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List return region_mask -def create_mouth_mask(face_landmark_68 : FaceLandmark68) -> Mask: - convex_hull = cv2.convexHull(face_landmark_68[numpy.r_[3:14, 31:36]].astype(numpy.int32)) - mouth_mask : Mask = numpy.zeros((512, 512)).astype(numpy.float32) - mouth_mask = cv2.fillConvexPoly(mouth_mask, convex_hull, 1.0) #type:ignore[call-overload] - mouth_mask = cv2.erode(mouth_mask.clip(0, 1), numpy.ones((21, 3))) - mouth_mask = cv2.GaussianBlur(mouth_mask, (0, 0), sigmaX = 1, sigmaY = 15) - return mouth_mask - - def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask: model_name = state_manager.get_item('face_occluder_model') face_occluder = get_inference_pool().get(model_name) diff --git a/facefusion/processors/modules/face_debugger.py b/facefusion/processors/modules/face_debugger.py index ea53b7d..8993107 100755 --- a/facefusion/processors/modules/face_debugger.py +++ b/facefusion/processors/modules/face_debugger.py @@ -10,7 +10,7 @@ import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, video_manager, wording from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import warp_face_by_face_landmark_5 -from facefusion.face_masker import create_occlusion_mask, create_region_mask, create_static_box_mask +from facefusion.face_masker import create_area_mask, create_occlusion_mask, create_region_mask, create_static_box_mask from facefusion.face_selector import find_similar_faces, sort_and_filter_faces from facefusion.face_store import get_reference_faces from facefusion.filesystem import in_directory, same_file_extension @@ -104,6 +104,11 @@ def debug_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_masks.append(occlusion_mask) + if 'area' in state_manager.get_item('face_mask_types'): + landmarks_68 = cv2.transform(target_face.landmark_set.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2) + area_mask = create_area_mask(landmarks_68, state_manager.get_item('face_mask_areas')) + crop_masks.append(area_mask) + if 'region' in state_manager.get_item('face_mask_types'): region_mask = create_region_mask(crop_vision_frame, state_manager.get_item('face_mask_regions')) crop_masks.append(region_mask) diff --git a/facefusion/processors/modules/lip_syncer.py b/facefusion/processors/modules/lip_syncer.py index 2b2360f..a9855a6 100755 --- a/facefusion/processors/modules/lip_syncer.py +++ b/facefusion/processors/modules/lip_syncer.py @@ -15,7 +15,7 @@ from facefusion.common_helper import get_first from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5 -from facefusion.face_masker import create_mouth_mask, create_occlusion_mask, create_static_box_mask +from facefusion.face_masker import create_area_mask, create_occlusion_mask from facefusion.face_selector import find_similar_faces, sort_and_filter_faces from facefusion.face_store import get_reference_faces from facefusion.filesystem import filter_audio_paths, has_audio, in_directory, is_image, is_video, resolve_relative_path, same_file_extension @@ -150,12 +150,10 @@ def sync_lip(target_face : Face, temp_audio_frame : AudioFrame, temp_vision_fram face_landmark_68 = cv2.transform(target_face.landmark_set.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2) bounding_box = create_bounding_box(face_landmark_68) bounding_box[1] -= numpy.abs(bounding_box[3] - bounding_box[1]) * 0.125 - mouth_mask = create_mouth_mask(face_landmark_68) - box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding')) + area_mask = create_area_mask(face_landmark_68, [ 'lower-head' ]) crop_masks =\ [ - mouth_mask, - box_mask + area_mask ] if 'occlusion' in state_manager.get_item('face_mask_types'): diff --git a/facefusion/program.py b/facefusion/program.py index dbd6a90..72146c1 100755 --- a/facefusion/program.py +++ b/facefusion/program.py @@ -136,10 +136,11 @@ def create_face_masker_program() -> ArgumentParser: group_face_masker.add_argument('--face-occluder-model', help = wording.get('help.face_occluder_model'), default = config.get_str_value('face_masker', 'face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models) group_face_masker.add_argument('--face-parser-model', help = wording.get('help.face_parser_model'), default = config.get_str_value('face_masker', 'face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models) group_face_masker.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker', 'face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES') + group_face_masker.add_argument('--face-mask-areas', help = wording.get('help.face_mask_areas').format(choices = ', '.join(facefusion.choices.face_mask_areas)), default = config.get_str_list('face_masker', 'face_mask_areas', ' '.join(facefusion.choices.face_mask_areas)), choices = facefusion.choices.face_mask_areas, nargs = '+', metavar = 'FACE_MASK_AREAS') + group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker', 'face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') group_face_masker.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker', 'face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range)) group_face_masker.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_masker', 'face_mask_padding', '0 0 0 0'), nargs = '+') - group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker', 'face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') - job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_blur', 'face_mask_padding', 'face_mask_regions' ]) + job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_areas', 'face_mask_regions', 'face_mask_blur', 'face_mask_padding' ]) return program diff --git a/facefusion/types.py b/facefusion/types.py index 57210b0..c12e80f 100755 --- a/facefusion/types.py +++ b/facefusion/types.py @@ -107,9 +107,11 @@ FaceSelectorMode = Literal['many', 'one', 'reference'] FaceSelectorOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best'] FaceOccluderModel = Literal['xseg_1', 'xseg_2', 'xseg_3'] FaceParserModel = Literal['bisenet_resnet_18', 'bisenet_resnet_34'] -FaceMaskType = Literal['box', 'occlusion', 'region'] +FaceMaskType = Literal['box', 'occlusion', 'area', 'region'] +FaceMaskArea = Literal['upper-head', 'lower-head', 'mouth', 'jaw'] FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'] FaceMaskRegionSet : TypeAlias = Dict[FaceMaskRegion, int] +FaceMaskAreaSet : TypeAlias = Dict[FaceMaskArea, List[int]] AudioFormat = Literal['flac', 'm4a', 'mp3', 'ogg', 'opus', 'wav'] ImageFormat = Literal['bmp', 'jpeg', 'png', 'tiff', 'webp'] @@ -258,6 +260,7 @@ StateKey = Literal\ 'face_mask_blur', 'face_mask_padding', 'face_mask_regions', + 'face_mask_areas', 'trim_frame_start', 'trim_frame_end', 'temp_frame_format', @@ -323,6 +326,7 @@ State = TypedDict('State', 'face_mask_blur' : float, 'face_mask_padding' : Padding, 'face_mask_regions' : List[FaceMaskRegion], + 'face_mask_areas' : List[FaceMaskArea], 'trim_frame_start' : int, 'trim_frame_end' : int, 'temp_frame_format' : TempFrameFormat, diff --git a/facefusion/uis/components/face_masker.py b/facefusion/uis/components/face_masker.py index 6856c34..e01a5cd 100755 --- a/facefusion/uis/components/face_masker.py +++ b/facefusion/uis/components/face_masker.py @@ -5,12 +5,13 @@ import gradio import facefusion.choices from facefusion import face_masker, state_manager, wording from facefusion.common_helper import calc_float_step, calc_int_step -from facefusion.types import FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel +from facefusion.types import FaceMaskArea, FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel from facefusion.uis.core import register_ui_component FACE_OCCLUDER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_PARSER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None +FACE_MASK_AREAS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_MASK_REGIONS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None @@ -23,6 +24,7 @@ def render() -> None: global FACE_OCCLUDER_MODEL_DROPDOWN global FACE_PARSER_MODEL_DROPDOWN global FACE_MASK_TYPES_CHECKBOX_GROUP + global FACE_MASK_AREAS_CHECKBOX_GROUP global FACE_MASK_REGIONS_CHECKBOX_GROUP global FACE_MASK_BLUR_SLIDER global FACE_MASK_PADDING_TOP_SLIDER @@ -32,6 +34,7 @@ def render() -> None: has_box_mask = 'box' in state_manager.get_item('face_mask_types') has_region_mask = 'region' in state_manager.get_item('face_mask_types') + has_area_mask = 'area' in state_manager.get_item('face_mask_types') with gradio.Row(): FACE_OCCLUDER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_occluder_model_dropdown'), @@ -48,6 +51,12 @@ def render() -> None: choices = facefusion.choices.face_mask_types, value = state_manager.get_item('face_mask_types') ) + FACE_MASK_AREAS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('uis.face_mask_areas_checkbox_group'), + choices = facefusion.choices.face_mask_areas, + value = state_manager.get_item('face_mask_areas'), + visible = has_area_mask + ) FACE_MASK_REGIONS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_mask_regions_checkbox_group'), choices = facefusion.choices.face_mask_regions, @@ -100,6 +109,7 @@ def render() -> None: register_ui_component('face_occluder_model_dropdown', FACE_OCCLUDER_MODEL_DROPDOWN) register_ui_component('face_parser_model_dropdown', FACE_PARSER_MODEL_DROPDOWN) register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP) + register_ui_component('face_mask_areas_checkbox_group', FACE_MASK_AREAS_CHECKBOX_GROUP) register_ui_component('face_mask_regions_checkbox_group', FACE_MASK_REGIONS_CHECKBOX_GROUP) register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER) register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER) @@ -111,9 +121,11 @@ def render() -> None: def listen() -> None: FACE_OCCLUDER_MODEL_DROPDOWN.change(update_face_occluder_model, inputs = FACE_OCCLUDER_MODEL_DROPDOWN) FACE_PARSER_MODEL_DROPDOWN.change(update_face_parser_model, inputs = FACE_PARSER_MODEL_DROPDOWN) - FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_types, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_REGIONS_CHECKBOX_GROUP, FACE_MASK_BLUR_SLIDER, FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]) + FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_types, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_AREAS_CHECKBOX_GROUP, FACE_MASK_REGIONS_CHECKBOX_GROUP, FACE_MASK_BLUR_SLIDER, FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]) + FACE_MASK_AREAS_CHECKBOX_GROUP.change(update_face_mask_areas, inputs = FACE_MASK_AREAS_CHECKBOX_GROUP, outputs = FACE_MASK_AREAS_CHECKBOX_GROUP) FACE_MASK_REGIONS_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGIONS_CHECKBOX_GROUP, outputs = FACE_MASK_REGIONS_CHECKBOX_GROUP) FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER) + face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ] for face_mask_padding_slider in face_mask_padding_sliders: face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders) @@ -137,12 +149,19 @@ def update_face_parser_model(face_parser_model : FaceParserModel) -> gradio.Drop return gradio.Dropdown() -def update_face_mask_types(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider]: +def update_face_mask_types(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider]: face_mask_types = face_mask_types or facefusion.choices.face_mask_types state_manager.set_item('face_mask_types', face_mask_types) has_box_mask = 'box' in face_mask_types + has_area_mask = 'area' in face_mask_types has_region_mask = 'region' in face_mask_types - return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_types')), gradio.CheckboxGroup(visible = has_region_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask) + return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_types')), gradio.CheckboxGroup(visible = has_area_mask), gradio.CheckboxGroup(visible = has_region_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask), gradio.Slider(visible = has_box_mask) + + +def update_face_mask_areas(face_mask_areas : List[FaceMaskArea]) -> gradio.CheckboxGroup: + face_mask_areas = face_mask_areas or facefusion.choices.face_mask_areas + state_manager.set_item('face_mask_areas', face_mask_areas) + return gradio.CheckboxGroup(value = state_manager.get_item('face_mask_areas')) def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup: diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index 9808467..d6283ef 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -104,6 +104,7 @@ def listen() -> None: 'face_debugger_items_checkbox_group', 'frame_colorizer_size_dropdown', 'face_mask_types_checkbox_group', + 'face_mask_areas_checkbox_group', 'face_mask_regions_checkbox_group' ]): ui_component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) diff --git a/facefusion/uis/types.py b/facefusion/uis/types.py index c6e8fb3..52c5015 100644 --- a/facefusion/uis/types.py +++ b/facefusion/uis/types.py @@ -1,8 +1,6 @@ from typing import Any, Dict, IO, Literal, TypeAlias File : TypeAlias = IO[Any] -Component : TypeAlias = Any -ComponentOptions : TypeAlias = Dict[str, Any] ComponentName = Literal\ [ 'age_modifier_direction_slider', @@ -38,13 +36,14 @@ ComponentName = Literal\ 'face_enhancer_weight_slider', 'face_landmarker_model_dropdown', 'face_landmarker_score_slider', + 'face_mask_types_checkbox_group', + 'face_mask_areas_checkbox_group', + 'face_mask_regions_checkbox_group', 'face_mask_blur_slider', 'face_mask_padding_bottom_slider', 'face_mask_padding_left_slider', 'face_mask_padding_right_slider', 'face_mask_padding_top_slider', - 'face_mask_regions_checkbox_group', - 'face_mask_types_checkbox_group', 'face_selector_age_range_slider', 'face_selector_gender_dropdown', 'face_selector_mode_dropdown', @@ -79,6 +78,8 @@ ComponentName = Literal\ 'webcam_mode_radio', 'webcam_resolution_dropdown' ] +Component : TypeAlias = Any +ComponentOptions : TypeAlias = Dict[str, Any] JobManagerAction = Literal['job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step'] JobRunnerAction = Literal['job-run', 'job-run-all', 'job-retry', 'job-retry-all'] diff --git a/facefusion/wording.py b/facefusion/wording.py index 7372832..e09e7da 100755 --- a/facefusion/wording.py +++ b/facefusion/wording.py @@ -131,6 +131,7 @@ WORDING : Dict[str, Any] =\ 'face_mask_types': 'mix and match different face mask types (choices: {choices})', 'face_mask_blur': 'specify the degree of blur applied to the box mask', 'face_mask_padding': 'apply top, right, bottom and left padding to the box mask', + 'face_mask_areas': 'choose the facial features used for the area mask (choices: {choices})', 'face_mask_regions': 'choose the facial features used for the region mask (choices: {choices})', # frame extraction 'trim_frame_start': 'specify the starting frame of the target video', @@ -281,6 +282,7 @@ WORDING : Dict[str, Any] =\ 'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT', 'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT', 'face_mask_padding_top_slider': 'FACE MASK PADDING TOP', + 'face_mask_areas_checkbox_group': 'FACE MASK AREAS', 'face_mask_regions_checkbox_group': 'FACE MASK REGIONS', 'face_mask_types_checkbox_group': 'FACE MASK TYPES', 'face_selector_age_range_slider': 'FACE SELECTOR AGE',