Polish benchmark and orders

This commit is contained in:
henryruhs
2025-06-16 15:59:40 +02:00
parent 5218d852ec
commit be26611693
10 changed files with 71 additions and 71 deletions

View File

@@ -100,19 +100,19 @@ open_browser =
ui_layouts =
ui_workflow =
[execution]
execution_device_id =
execution_providers =
execution_thread_count =
execution_queue_count =
[download]
download_providers =
download_scope =
[benchmark]
benchmark_resolutions =
benchmark_cycles =
benchmark_cycle_count =
[execution]
execution_device_id =
execution_providers =
execution_thread_count =
execution_queue_count =
[memory]
video_memory_strategy =

View File

@@ -127,7 +127,7 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
apply_state_item('download_scope', args.get('download_scope'))
# benchmark
apply_state_item('benchmark_resolutions', args.get('benchmark_resolutions'))
apply_state_item('benchmark_cycles', args.get('benchmark_cycles'))
apply_state_item('benchmark_cycle_count', args.get('benchmark_cycle_count'))
# memory
apply_state_item('video_memory_strategy', args.get('video_memory_strategy'))
apply_state_item('system_memory_limit', args.get('system_memory_limit'))

View File

@@ -32,14 +32,14 @@ def pre_check() -> bool:
def run() -> Generator[List[BenchmarkCycleSet], None, None]:
benchmark_resolutions = state_manager.get_item('benchmark_resolutions')
benchmark_cycles = state_manager.get_item('benchmark_cycles')
benchmark_cycle_count = state_manager.get_item('benchmark_cycle_count')
state_manager.set_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ])
state_manager.set_item('face_landmarker_score', 0)
state_manager.set_item('temp_frame_format', 'bmp')
state_manager.set_item('output_audio_volume', 0)
state_manager.set_item('output_video_preset', 'ultrafast')
state_manager.set_item('video_memory_strategy', 'tolerant')
state_manager.init_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ])
state_manager.init_item('face_landmarker_score', 0)
state_manager.init_item('temp_frame_format', 'bmp')
state_manager.init_item('output_audio_volume', 0)
state_manager.init_item('output_video_preset', 'ultrafast')
state_manager.init_item('video_memory_strategy', 'tolerant')
benchmarks = []
target_paths = [facefusion.choices.benchmark_set.get(benchmark_resolution) for benchmark_resolution in benchmark_resolutions if benchmark_resolution in facefusion.choices.benchmark_set]
@@ -47,11 +47,11 @@ def run() -> Generator[List[BenchmarkCycleSet], None, None]:
for target_path in target_paths:
state_manager.set_item('target_path', target_path)
state_manager.set_item('output_path', suggest_output_path(state_manager.get_item('target_path')))
benchmarks.append(cycle(benchmark_cycles))
benchmarks.append(cycle(benchmark_cycle_count))
yield benchmarks
def cycle(benchmark_cycles : int) -> BenchmarkCycleSet:
def cycle(benchmark_cycle_count : int) -> BenchmarkCycleSet:
process_times = []
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
output_video_resolution = detect_video_resolution(state_manager.get_item('target_path'))
@@ -60,7 +60,7 @@ def cycle(benchmark_cycles : int) -> BenchmarkCycleSet:
core.conditional_process()
for index in range(benchmark_cycles):
for index in range(benchmark_cycle_count):
start_time = perf_counter()
core.conditional_process()
end_time = perf_counter()
@@ -69,12 +69,12 @@ def cycle(benchmark_cycles : int) -> BenchmarkCycleSet:
average_run = round(statistics.mean(process_times), 2)
fastest_run = round(min(process_times), 2)
slowest_run = round(max(process_times), 2)
relative_fps = round(video_frame_total * benchmark_cycles / sum(process_times), 2)
relative_fps = round(video_frame_total * benchmark_cycle_count / sum(process_times), 2)
return\
{
'target_path': state_manager.get_item('target_path'),
'benchmark_cycles': benchmark_cycles,
'benchmark_cycle_count': benchmark_cycle_count,
'average_run': average_run,
'fastest_run': fastest_run,
'slowest_run': slowest_run,
@@ -92,7 +92,7 @@ def render() -> None:
headers =\
[
'target_path',
'benchmark_cycles',
'benchmark_cycle_count',
'average_run',
'fastest_run',
'slowest_run',

View File

@@ -148,7 +148,7 @@ log_levels : List[LogLevel] = list(log_level_set.keys())
ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ]
job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ]
benchmark_cycles_range : Sequence[int] = create_int_range(1, 10, 1)
benchmark_cycle_count_range : Sequence[int] = create_int_range(1, 10, 1)
execution_thread_count_range : Sequence[int] = create_int_range(1, 32, 1)
execution_queue_count_range : Sequence[int] = create_int_range(1, 4, 1)
system_memory_limit_range : Sequence[int] = create_int_range(0, 128, 4)

View File

@@ -194,6 +194,30 @@ def create_uis_program() -> ArgumentParser:
return program
def create_download_providers_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(facefusion.choices.download_providers)), default = config.get_str_list('download', 'download_providers', ' '.join(facefusion.choices.download_providers)), choices = facefusion.choices.download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS')
job_store.register_job_keys([ 'download_providers' ])
return program
def create_download_scope_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download', 'download_scope', 'lite'), choices = facefusion.choices.download_scopes)
job_store.register_job_keys([ 'download_scope' ])
return program
def create_benchmark_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_benchmark = program.add_argument_group('benchmark')
group_benchmark.add_argument('--benchmark-resolutions', help = wording.get('help.benchmark_resolutions'), default = config.get_str_list('benchmark', 'benchmark_resolutions', get_first(facefusion.choices.benchmark_resolutions)), choices = facefusion.choices.benchmark_resolutions, nargs = '+')
group_benchmark.add_argument('--benchmark-cycle-count', help = wording.get('help.benchmark_cycle_count'), type = int, default = config.get_int_value('benchmark', 'benchmark_cycle_count', '5'), choices = facefusion.choices.benchmark_cycle_count_range)
return program
def create_execution_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
available_execution_providers = get_available_execution_providers()
@@ -206,30 +230,6 @@ def create_execution_program() -> ArgumentParser:
return program
def create_download_providers_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(facefusion.choices.download_providers)), default = config.get_str_list('download', 'download_providers', ' '.join(facefusion.choices.download_providers)), choices = facefusion.choices.download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS')
job_store.register_job_keys([ 'download_providers' ])
return program
def create_benchmark_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_benchmark = program.add_argument_group('benchmark')
group_benchmark.add_argument('--benchmark-resolutions', help = wording.get('help.benchmark_resolutions'), default = config.get_str_list('benchmark', 'benchmark_resolutions', get_first(facefusion.choices.benchmark_resolutions)), choices = facefusion.choices.benchmark_resolutions, nargs = '+')
group_benchmark.add_argument('--benchmark-cycles', help = wording.get('help.benchmark_cycles'), type = int, default = config.get_int_value('benchmark', 'benchmark_cycles', '5'), choices = facefusion.choices.benchmark_cycles_range)
return program
def create_download_scope_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_download = program.add_argument_group('download')
group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download', 'download_scope', 'lite'), choices = facefusion.choices.download_scopes)
job_store.register_job_keys([ 'download_scope' ])
return program
def create_memory_program() -> ArgumentParser:
program = ArgumentParser(add_help = False)
group_memory = program.add_argument_group('memory')
@@ -292,7 +292,7 @@ def create_program() -> ArgumentParser:
sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = wording.get('help.benchmark'), parents = [ create_temp_path_program(), create_benchmark_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('benchmark', help = wording.get('help.benchmark'), parents = [ create_temp_path_program(), collect_step_program(), create_benchmark_program(), collect_job_program() ], formatter_class = create_help_formatter_large)
# job manager
sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)
sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large)

View File

@@ -135,7 +135,7 @@ BenchmarkSet : TypeAlias = Dict[BenchmarkResolution, str]
BenchmarkCycleSet = TypedDict('BenchmarkCycleSet',
{
'target_path' : str,
'benchmark_cycles' : int,
'cycle_count' : int,
'average_run' : float,
'fastest_run' : float,
'slowest_run' : float,
@@ -253,7 +253,7 @@ StateKey = Literal\
'download_providers',
'download_scope',
'benchmark_resolutions',
'benchmark_cycles',
'benchmark_cycle_count',
'face_detector_model',
'face_detector_size',
'face_detector_angles',
@@ -321,7 +321,7 @@ State = TypedDict('State',
'download_providers': List[DownloadProvider],
'download_scope': DownloadScope,
'benchmark_resolutions': List[BenchmarkResolution],
'benchmark_cycles': int,
'benchmark_cycle_count': int,
'face_detector_model' : FaceDetectorModel,
'face_detector_size' : str,
'face_detector_angles' : List[Angle],

View File

@@ -18,7 +18,7 @@ def render() -> None:
headers =
[
'target_path',
'benchmark_cycles',
'cycle_count',
'average_run',
'fastest_run',
'slowest_run',
@@ -44,15 +44,15 @@ def render() -> None:
def listen() -> None:
benchmark_resolutions_checkbox_group = get_ui_component('benchmark_resolutions_checkbox_group')
benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
benchmark_cycle_count_slider = get_ui_component('benchmark_cycle_count_slider')
if benchmark_resolutions_checkbox_group and benchmark_cycles_slider:
BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_resolutions_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_BENCHMARKS_DATAFRAME)
if benchmark_resolutions_checkbox_group and benchmark_cycle_count_slider:
BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_resolutions_checkbox_group, benchmark_cycle_count_slider ], outputs = BENCHMARK_BENCHMARKS_DATAFRAME)
def start(benchmark_resolutions : List[BenchmarkResolution], benchmark_cycles : int) -> Generator[List[Any], None, None]:
def start(benchmark_resolutions : List[BenchmarkResolution], benchmark_cycle_count : int) -> Generator[List[Any], None, None]:
state_manager.set_item('benchmark_resolutions', benchmark_resolutions)
state_manager.set_item('benchmark_cycles', benchmark_cycles)
state_manager.set_item('benchmark_cycle_count', benchmark_cycle_count)
state_manager.sync_item('execution_providers')
state_manager.sync_item('execution_thread_count')
state_manager.sync_item('execution_queue_count')

View File

@@ -7,24 +7,24 @@ from facefusion import wording
from facefusion.uis.core import register_ui_component
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
BENCHMARK_CYCLE_COUNT_SLIDER : Optional[gradio.Button] = None
def render() -> None:
global BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP
global BENCHMARK_CYCLES_SLIDER
global BENCHMARK_CYCLE_COUNT_SLIDER
BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label = wording.get('uis.benchmark_resolutions_checkbox_group'),
choices = facefusion.choices.benchmark_resolutions,
value = facefusion.choices.benchmark_resolutions
)
BENCHMARK_CYCLES_SLIDER = gradio.Slider(
label = wording.get('uis.benchmark_cycles_slider'),
BENCHMARK_CYCLE_COUNT_SLIDER = gradio.Slider(
label = wording.get('uis.benchmark_cycle_count_slider'),
value = 5,
step = 1,
minimum = min(facefusion.choices.benchmark_cycles_range),
maximum = max(facefusion.choices.benchmark_cycles_range)
minimum = min(facefusion.choices.benchmark_cycle_count_range),
maximum = max(facefusion.choices.benchmark_cycle_count_range)
)
register_ui_component('benchmark_resolutions_checkbox_group', BENCHMARK_RESOLUTIONS_CHECKBOX_GROUP)
register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
register_ui_component('benchmark_cycle_count_slider', BENCHMARK_CYCLE_COUNT_SLIDER)

View File

@@ -5,7 +5,7 @@ ComponentName = Literal\
[
'age_modifier_direction_slider',
'age_modifier_model_dropdown',
'benchmark_cycles_slider',
'benchmark_cycle_count_slider',
'benchmark_resolutions_checkbox_group',
'deep_swapper_model_dropdown',
'deep_swapper_morph_slider',

View File

@@ -189,17 +189,17 @@ WORDING : Dict[str, Any] =\
'open_browser': 'open the browser once the program is ready',
'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)',
'ui_workflow': 'choose the ui workflow',
# download
'download_providers': 'download using different providers (choices: {choices}, ...)',
'download_scope': 'specify the download scope',
# benchmark
'benchmark_resolutions': 'choose the resolutions for the benchmarks (choices: {choices}, ...)',
'benchmark_cycle_count': 'specify the amount of cycles per benchmark',
# execution
'execution_device_id': 'specify the device used for processing',
'execution_providers': 'inference using different providers (choices: {choices}, ...)',
'execution_thread_count': 'specify the amount of parallel threads while processing',
'execution_queue_count': 'specify the amount of frames each thread is processing',
# download
'download_providers': 'download using different providers (choices: {choices}, ...)',
'download_scope': 'specify the download scope',
# benchmark
'benchmark_resolutions': 'choose the resolution for the benchmarks (choices: {choices}, ...)',
'benchmark_cycles': 'specify the number of benchmark cycles',
# memory
'video_memory_strategy': 'balance fast processing and low VRAM usage',
'system_memory_limit': 'limit the available RAM that can be used while processing',
@@ -244,7 +244,7 @@ WORDING : Dict[str, Any] =\
'age_modifier_direction_slider': 'AGE MODIFIER DIRECTION',
'age_modifier_model_dropdown': 'AGE MODIFIER MODEL',
'apply_button': 'APPLY',
'benchmark_cycles_slider': 'BENCHMARK CYCLES',
'benchmark_cycle_count_slider': 'BENCHMARK CYCLE COUNT',
'benchmark_resolutions_checkbox_group': 'BENCHMARK RESOLUTIONS',
'clear_button': 'CLEAR',
'common_options_checkbox_group': 'OPTIONS',