Initial commit
This commit is contained in:
0
facefusion/uis/__init__.py
Normal file
0
facefusion/uis/__init__.py
Normal file
0
facefusion/uis/components/__init__.py
Normal file
0
facefusion/uis/components/__init__.py
Normal file
13
facefusion/uis/components/about.py
Normal file
13
facefusion/uis/components/about.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
from facefusion import metadata
|
||||
|
||||
ABOUT_HTML : Optional[gradio.HTML] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global ABOUT_HTML
|
||||
|
||||
with gradio.Box():
|
||||
ABOUT_HTML = gradio.HTML('<center><a href="' + metadata.get('url') + '">' + metadata.get('name') + ' ' + metadata.get('version') + '</a></center>')
|
||||
104
facefusion/uis/components/benchmark.py
Normal file
104
facefusion/uis/components/benchmark.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from typing import Any, Optional, List
|
||||
import time
|
||||
import tempfile
|
||||
import statistics
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.capturer import get_video_frame_total
|
||||
from facefusion.core import conditional_process
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import normalize_output_path
|
||||
|
||||
BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None
|
||||
BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
|
||||
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global BENCHMARK_RESULT_DATAFRAME
|
||||
global BENCHMARK_CYCLES_SLIDER
|
||||
global BENCHMARK_START_BUTTON
|
||||
|
||||
with gradio.Box():
|
||||
BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe(
|
||||
label = wording.get('benchmark_result_dataframe_label'),
|
||||
headers =
|
||||
[
|
||||
'target_path',
|
||||
'cycles',
|
||||
'average_run',
|
||||
'fastest_run',
|
||||
'slowest_run',
|
||||
'relative_fps'
|
||||
],
|
||||
col_count = (6, 'fixed'),
|
||||
row_count = (6, 'fixed'),
|
||||
datatype =
|
||||
[
|
||||
'str',
|
||||
'number',
|
||||
'number',
|
||||
'number',
|
||||
'number',
|
||||
'number'
|
||||
]
|
||||
)
|
||||
BENCHMARK_CYCLES_SLIDER = gradio.Slider(
|
||||
label = wording.get('benchmark_cycles_slider_label'),
|
||||
minimum = 1,
|
||||
step = 1,
|
||||
value = 3,
|
||||
maximum = 10
|
||||
)
|
||||
BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label'))
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME)
|
||||
|
||||
|
||||
def update(benchmark_cycles : int) -> Update:
|
||||
facefusion.globals.source_path = '.assets/examples/source.jpg'
|
||||
target_paths =\
|
||||
[
|
||||
'.assets/examples/target-240p.mp4',
|
||||
'.assets/examples/target-360p.mp4',
|
||||
'.assets/examples/target-540p.mp4',
|
||||
'.assets/examples/target-720p.mp4',
|
||||
'.assets/examples/target-1440p.mp4',
|
||||
'.assets/examples/target-2160p.mp4'
|
||||
]
|
||||
value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ]
|
||||
return gradio.update(value = value)
|
||||
|
||||
|
||||
def benchmark(target_path : str, cycles : int) -> List[Any]:
|
||||
process_times = []
|
||||
total_fps = 0.0
|
||||
for i in range(cycles + 1):
|
||||
facefusion.globals.target_path = target_path
|
||||
facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir())
|
||||
video_frame_total = get_video_frame_total(facefusion.globals.target_path)
|
||||
start_time = time.perf_counter()
|
||||
conditional_process()
|
||||
end_time = time.perf_counter()
|
||||
process_time = end_time - start_time
|
||||
fps = video_frame_total / process_time
|
||||
if i > 0:
|
||||
process_times.append(process_time)
|
||||
total_fps += fps
|
||||
average_process_time = round(statistics.mean(process_times), 2)
|
||||
fastest_process_time = round(min(process_times), 2)
|
||||
slowest_process_time = round(max(process_times), 2)
|
||||
average_fps = round(total_fps / cycles, 2)
|
||||
return\
|
||||
[
|
||||
facefusion.globals.target_path,
|
||||
cycles,
|
||||
average_process_time,
|
||||
fastest_process_time,
|
||||
slowest_process_time,
|
||||
average_fps
|
||||
]
|
||||
64
facefusion/uis/components/execution.py
Normal file
64
facefusion/uis/components/execution.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from typing import List, Optional
|
||||
import gradio
|
||||
import onnxruntime
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.face_analyser import clear_face_analyser
|
||||
from facefusion.processors.frame.core import clear_frame_processors_modules
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import encode_execution_providers, decode_execution_providers
|
||||
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
|
||||
EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
|
||||
global EXECUTION_THREAD_COUNT_SLIDER
|
||||
global EXECUTION_QUEUE_COUNT_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('execution_providers_checkbox_group_label'),
|
||||
choices = encode_execution_providers(onnxruntime.get_available_providers()),
|
||||
value = encode_execution_providers(facefusion.globals.execution_providers)
|
||||
)
|
||||
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
|
||||
label = wording.get('execution_thread_count_slider_label'),
|
||||
value = facefusion.globals.execution_thread_count,
|
||||
step = 1,
|
||||
minimum = 1,
|
||||
maximum = 128
|
||||
)
|
||||
EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
|
||||
label = wording.get('execution_queue_count_slider_label'),
|
||||
value = facefusion.globals.execution_queue_count,
|
||||
step = 1,
|
||||
minimum = 1,
|
||||
maximum = 16
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
|
||||
EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER, outputs = EXECUTION_THREAD_COUNT_SLIDER)
|
||||
EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER, outputs = EXECUTION_QUEUE_COUNT_SLIDER)
|
||||
|
||||
|
||||
def update_execution_providers(execution_providers : List[str]) -> Update:
|
||||
clear_face_analyser()
|
||||
clear_frame_processors_modules()
|
||||
facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
|
||||
return gradio.update(value = execution_providers)
|
||||
|
||||
|
||||
def update_execution_thread_count(execution_thread_count : int = 1) -> Update:
|
||||
facefusion.globals.execution_thread_count = execution_thread_count
|
||||
return gradio.update(value = execution_thread_count)
|
||||
|
||||
|
||||
def update_execution_queue_count(execution_queue_count : int = 1) -> Update:
|
||||
facefusion.globals.execution_queue_count = execution_queue_count
|
||||
return gradio.update(value = execution_queue_count)
|
||||
54
facefusion/uis/components/face_analyser.py
Normal file
54
facefusion/uis/components/face_analyser.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import Optional
|
||||
|
||||
import gradio
|
||||
|
||||
import facefusion.choices
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import Update
|
||||
|
||||
FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_ANALYSER_DIRECTION_DROPDOWN
|
||||
global FACE_ANALYSER_AGE_DROPDOWN
|
||||
global FACE_ANALYSER_GENDER_DROPDOWN
|
||||
|
||||
with gradio.Box():
|
||||
with gradio.Row():
|
||||
FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('face_analyser_direction_dropdown_label'),
|
||||
choices = facefusion.choices.face_analyser_direction,
|
||||
value = facefusion.globals.face_analyser_direction
|
||||
)
|
||||
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('face_analyser_age_dropdown_label'),
|
||||
choices = ['none'] + facefusion.choices.face_analyser_age,
|
||||
value = facefusion.globals.face_analyser_age or 'none'
|
||||
)
|
||||
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('face_analyser_gender_dropdown_label'),
|
||||
choices = ['none'] + facefusion.choices.face_analyser_gender,
|
||||
value = facefusion.globals.face_analyser_gender or 'none'
|
||||
)
|
||||
ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
|
||||
ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
|
||||
ui.register_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN, outputs = FACE_ANALYSER_DIRECTION_DROPDOWN)
|
||||
FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN, outputs = FACE_ANALYSER_AGE_DROPDOWN)
|
||||
FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN, outputs = FACE_ANALYSER_GENDER_DROPDOWN)
|
||||
|
||||
|
||||
def update_dropdown(name : str, value : str) -> Update:
|
||||
if value == 'none':
|
||||
setattr(facefusion.globals, name, None)
|
||||
else:
|
||||
setattr(facefusion.globals, name, value)
|
||||
return gradio.update(value = value)
|
||||
133
facefusion/uis/components/face_selector.py
Normal file
133
facefusion/uis/components/face_selector.py
Normal file
@@ -0,0 +1,133 @@
|
||||
from typing import List, Optional, Tuple, Any, Dict
|
||||
from time import sleep
|
||||
|
||||
import cv2
|
||||
import gradio
|
||||
|
||||
import facefusion.choices
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.capturer import get_video_frame
|
||||
from facefusion.face_analyser import get_many_faces
|
||||
from facefusion.face_reference import clear_face_reference
|
||||
from facefusion.typing import Frame, FaceRecognition
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import ComponentName, Update
|
||||
from facefusion.utilities import is_image, is_video
|
||||
|
||||
FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
|
||||
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FACE_RECOGNITION_DROPDOWN
|
||||
global REFERENCE_FACE_POSITION_GALLERY
|
||||
global REFERENCE_FACE_DISTANCE_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
reference_face_gallery_args: Dict[str, Any] = {
|
||||
'label': wording.get('reference_face_gallery_label'),
|
||||
'height': 120,
|
||||
'object_fit': 'cover',
|
||||
'columns': 10,
|
||||
'allow_preview': False,
|
||||
'visible': 'reference' in facefusion.globals.face_recognition
|
||||
}
|
||||
if is_image(facefusion.globals.target_path):
|
||||
reference_frame = cv2.imread(facefusion.globals.target_path)
|
||||
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
||||
if is_video(facefusion.globals.target_path):
|
||||
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
||||
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
||||
FACE_RECOGNITION_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('face_recognition_dropdown_label'),
|
||||
choices = facefusion.choices.face_recognition,
|
||||
value = facefusion.globals.face_recognition
|
||||
)
|
||||
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
|
||||
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
|
||||
label = wording.get('reference_face_distance_slider_label'),
|
||||
value = facefusion.globals.reference_face_distance,
|
||||
maximum = 3,
|
||||
step = 0.05,
|
||||
visible = 'reference' in facefusion.globals.face_recognition
|
||||
)
|
||||
ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN)
|
||||
ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
|
||||
ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
|
||||
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position)
|
||||
REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
|
||||
update_component_names : List[ComponentName] =\
|
||||
[
|
||||
'target_file',
|
||||
'preview_frame_slider'
|
||||
]
|
||||
for component_name in update_component_names:
|
||||
component = ui.get_component(component_name)
|
||||
if component:
|
||||
component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
select_component_names : List[ComponentName] =\
|
||||
[
|
||||
'face_analyser_direction_dropdown',
|
||||
'face_analyser_age_dropdown',
|
||||
'face_analyser_gender_dropdown'
|
||||
]
|
||||
for component_name in select_component_names:
|
||||
component = ui.get_component(component_name)
|
||||
if component:
|
||||
component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
||||
|
||||
|
||||
def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]:
|
||||
if face_recognition == 'reference':
|
||||
facefusion.globals.face_recognition = face_recognition
|
||||
return gradio.update(visible = True), gradio.update(visible = True)
|
||||
if face_recognition == 'many':
|
||||
facefusion.globals.face_recognition = face_recognition
|
||||
return gradio.update(visible = False), gradio.update(visible = False)
|
||||
|
||||
|
||||
def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update:
|
||||
clear_face_reference()
|
||||
return update_face_reference_position(event.index)
|
||||
|
||||
|
||||
def update_face_reference_position(reference_face_position : int = 0) -> Update:
|
||||
sleep(0.2)
|
||||
gallery_frames = []
|
||||
facefusion.globals.reference_face_position = reference_face_position
|
||||
if is_image(facefusion.globals.target_path):
|
||||
reference_frame = cv2.imread(facefusion.globals.target_path)
|
||||
gallery_frames = extract_gallery_frames(reference_frame)
|
||||
if is_video(facefusion.globals.target_path):
|
||||
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
||||
gallery_frames = extract_gallery_frames(reference_frame)
|
||||
if gallery_frames:
|
||||
return gradio.update(value = gallery_frames)
|
||||
return gradio.update(value = None)
|
||||
|
||||
|
||||
def update_reference_face_distance(reference_face_distance : float) -> Update:
|
||||
facefusion.globals.reference_face_distance = reference_face_distance
|
||||
return gradio.update(value = reference_face_distance)
|
||||
|
||||
|
||||
def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
|
||||
crop_frames = []
|
||||
faces = get_many_faces(reference_frame)
|
||||
for face in faces:
|
||||
start_x, start_y, end_x, end_y = map(int, face['bbox'])
|
||||
padding_x = int((end_x - start_x) * 0.25)
|
||||
padding_y = int((end_y - start_y) * 0.25)
|
||||
start_x = max(0, start_x - padding_x)
|
||||
start_y = max(0, start_y - padding_y)
|
||||
end_x = max(0, end_x + padding_x)
|
||||
end_y = max(0, end_y + padding_y)
|
||||
crop_frame = reference_frame[start_y:end_y, start_x:end_x]
|
||||
crop_frames.append(ui.normalize_frame(crop_frame))
|
||||
return crop_frames
|
||||
53
facefusion/uis/components/output.py
Normal file
53
facefusion/uis/components/output.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import Tuple, Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.core import conditional_process
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import is_image, is_video, normalize_output_path
|
||||
|
||||
OUTPUT_START_BUTTON : Optional[gradio.Button] = None
|
||||
OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None
|
||||
OUTPUT_IMAGE : Optional[gradio.Image] = None
|
||||
OUTPUT_VIDEO : Optional[gradio.Video] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global OUTPUT_START_BUTTON
|
||||
global OUTPUT_CLEAR_BUTTON
|
||||
global OUTPUT_IMAGE
|
||||
global OUTPUT_VIDEO
|
||||
|
||||
with gradio.Row():
|
||||
with gradio.Box():
|
||||
OUTPUT_IMAGE = gradio.Image(
|
||||
label = wording.get('output_image_or_video_label'),
|
||||
visible = False
|
||||
)
|
||||
OUTPUT_VIDEO = gradio.Video(
|
||||
label = wording.get('output_image_or_video_label')
|
||||
)
|
||||
with gradio.Row():
|
||||
OUTPUT_START_BUTTON = gradio.Button(wording.get('start_button_label'))
|
||||
OUTPUT_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label'))
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
OUTPUT_START_BUTTON.click(update, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
|
||||
OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
|
||||
|
||||
|
||||
def update() -> Tuple[Update, Update]:
|
||||
facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, '.')
|
||||
if facefusion.globals.output_path:
|
||||
conditional_process()
|
||||
if is_image(facefusion.globals.output_path):
|
||||
return gradio.update(value = facefusion.globals.output_path, visible = True), gradio.update(value = None, visible = False)
|
||||
if is_video(facefusion.globals.output_path):
|
||||
return gradio.update(value = None, visible = False), gradio.update(value = facefusion.globals.output_path, visible = True)
|
||||
return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False)
|
||||
|
||||
|
||||
def clear() -> Tuple[Update, Update]:
|
||||
return gradio.update(value = None), gradio.update(value = None)
|
||||
43
facefusion/uis/components/output_settings.py
Normal file
43
facefusion/uis/components/output_settings.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.choices
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.typing import OutputVideoEncoder
|
||||
from facefusion.uis.typing import Update
|
||||
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global OUTPUT_VIDEO_ENCODER_DROPDOWN
|
||||
global OUTPUT_VIDEO_QUALITY_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('output_video_encoder_dropdown_label'),
|
||||
choices = facefusion.choices.output_video_encoder,
|
||||
value = facefusion.globals.output_video_encoder
|
||||
)
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
|
||||
label = wording.get('output_video_quality_slider_label'),
|
||||
value = facefusion.globals.output_video_quality,
|
||||
step = 1
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN, outputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
|
||||
OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER, outputs = OUTPUT_VIDEO_QUALITY_SLIDER)
|
||||
|
||||
|
||||
def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> Update:
|
||||
facefusion.globals.output_video_encoder = output_video_encoder
|
||||
return gradio.update(value = output_video_encoder)
|
||||
|
||||
|
||||
def update_output_video_quality(output_video_quality : int) -> Update:
|
||||
facefusion.globals.output_video_quality = output_video_quality
|
||||
return gradio.update(value = output_video_quality)
|
||||
121
facefusion/uis/components/preview.py
Normal file
121
facefusion/uis/components/preview.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from time import sleep
|
||||
from typing import Any, Dict, Tuple, List, Optional
|
||||
import cv2
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.capturer import get_video_frame, get_video_frame_total
|
||||
from facefusion.face_analyser import get_one_face
|
||||
from facefusion.face_reference import get_face_reference, set_face_reference
|
||||
from facefusion.predictor import predict_frame
|
||||
from facefusion.processors.frame.core import load_frame_processor_module
|
||||
from facefusion.typing import Frame
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import ComponentName, Update
|
||||
from facefusion.utilities import is_video, is_image
|
||||
|
||||
PREVIEW_IMAGE : Optional[gradio.Image] = None
|
||||
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global PREVIEW_IMAGE
|
||||
global PREVIEW_FRAME_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
preview_image_args: Dict[str, Any] = {
|
||||
'label': wording.get('preview_image_label')
|
||||
}
|
||||
preview_frame_slider_args: Dict[str, Any] = {
|
||||
'label': wording.get('preview_frame_slider_label'),
|
||||
'step': 1,
|
||||
'visible': False
|
||||
}
|
||||
if is_image(facefusion.globals.target_path):
|
||||
target_frame = cv2.imread(facefusion.globals.target_path)
|
||||
preview_frame = extract_preview_frame(target_frame)
|
||||
preview_image_args['value'] = ui.normalize_frame(preview_frame)
|
||||
if is_video(facefusion.globals.target_path):
|
||||
temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
||||
preview_frame = extract_preview_frame(temp_frame)
|
||||
preview_image_args['value'] = ui.normalize_frame(preview_frame)
|
||||
preview_image_args['visible'] = True
|
||||
preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
|
||||
preview_frame_slider_args['maximum'] = get_video_frame_total(facefusion.globals.target_path)
|
||||
preview_frame_slider_args['visible'] = True
|
||||
PREVIEW_IMAGE = gradio.Image(**preview_image_args)
|
||||
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
|
||||
ui.register_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
PREVIEW_FRAME_SLIDER.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
|
||||
update_component_names : List[ComponentName] =\
|
||||
[
|
||||
'source_file',
|
||||
'target_file',
|
||||
'face_recognition_dropdown',
|
||||
'reference_face_distance_slider',
|
||||
'frame_processors_checkbox_group'
|
||||
]
|
||||
for component_name in update_component_names:
|
||||
component = ui.get_component(component_name)
|
||||
if component:
|
||||
component.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
|
||||
select_component_names : List[ComponentName] =\
|
||||
[
|
||||
'reference_face_position_gallery',
|
||||
'face_analyser_direction_dropdown',
|
||||
'face_analyser_age_dropdown',
|
||||
'face_analyser_gender_dropdown'
|
||||
]
|
||||
for component_name in select_component_names:
|
||||
component = ui.get_component(component_name)
|
||||
if component:
|
||||
component.select(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
|
||||
|
||||
|
||||
def update(frame_number : int = 0) -> Tuple[Update, Update]:
|
||||
sleep(0.1)
|
||||
if is_image(facefusion.globals.target_path):
|
||||
target_frame = cv2.imread(facefusion.globals.target_path)
|
||||
preview_frame = extract_preview_frame(target_frame)
|
||||
return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(value = None, maximum = None, visible = False)
|
||||
if is_video(facefusion.globals.target_path):
|
||||
facefusion.globals.reference_frame_number = frame_number
|
||||
video_frame_total = get_video_frame_total(facefusion.globals.target_path)
|
||||
temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
||||
preview_frame = extract_preview_frame(temp_frame)
|
||||
return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(maximum = video_frame_total, visible = True)
|
||||
return gradio.update(value = None), gradio.update(value = None, maximum = None, visible = False)
|
||||
|
||||
|
||||
def extract_preview_frame(temp_frame : Frame) -> Frame:
|
||||
if predict_frame(temp_frame):
|
||||
return cv2.GaussianBlur(temp_frame, (99, 99), 0)
|
||||
source_face = get_one_face(cv2.imread(facefusion.globals.source_path)) if facefusion.globals.source_path else None
|
||||
temp_frame = reduce_preview_frame(temp_frame)
|
||||
if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
|
||||
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
||||
reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
|
||||
set_face_reference(reference_face)
|
||||
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
|
||||
for frame_processor in facefusion.globals.frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
if frame_processor_module.pre_process():
|
||||
temp_frame = frame_processor_module.process_frame(
|
||||
source_face,
|
||||
reference_face,
|
||||
temp_frame
|
||||
)
|
||||
return temp_frame
|
||||
|
||||
|
||||
def reduce_preview_frame(temp_frame : Frame, max_height : int = 480) -> Frame:
|
||||
height, width = temp_frame.shape[:2]
|
||||
if height > max_height:
|
||||
scale = max_height / height
|
||||
max_width = int(width * scale)
|
||||
temp_frame = cv2.resize(temp_frame, (max_width, max_height))
|
||||
return temp_frame
|
||||
41
facefusion/uis/components/processors.py
Normal file
41
facefusion/uis/components/processors.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import List, Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import list_module_names
|
||||
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global FRAME_PROCESSORS_CHECKBOX_GROUP
|
||||
|
||||
with gradio.Box():
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
||||
label = wording.get('frame_processors_checkbox_group_label'),
|
||||
choices = sort_frame_processors(facefusion.globals.frame_processors),
|
||||
value = facefusion.globals.frame_processors
|
||||
)
|
||||
ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
|
||||
|
||||
|
||||
def update_frame_processors(frame_processors : List[str]) -> Update:
|
||||
clear_frame_processors_modules()
|
||||
facefusion.globals.frame_processors = frame_processors
|
||||
for frame_processor in facefusion.globals.frame_processors:
|
||||
frame_processor_module = load_frame_processor_module(frame_processor)
|
||||
frame_processor_module.pre_check()
|
||||
return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors))
|
||||
|
||||
|
||||
def sort_frame_processors(frame_processors : List[str]) -> list[str]:
|
||||
frame_processors_names = list_module_names('facefusion/processors/frame/modules')
|
||||
return sorted(frame_processors_names, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
|
||||
41
facefusion/uis/components/settings.py
Normal file
41
facefusion/uis/components/settings.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.uis.typing import Update
|
||||
|
||||
KEEP_FPS_CHECKBOX : Optional[gradio.Checkbox] = None
|
||||
KEEP_TEMP_CHECKBOX : Optional[gradio.Checkbox] = None
|
||||
SKIP_AUDIO_CHECKBOX : Optional[gradio.Checkbox] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global KEEP_FPS_CHECKBOX
|
||||
global KEEP_TEMP_CHECKBOX
|
||||
global SKIP_AUDIO_CHECKBOX
|
||||
|
||||
with gradio.Box():
|
||||
KEEP_FPS_CHECKBOX = gradio.Checkbox(
|
||||
label = wording.get('keep_fps_checkbox_label'),
|
||||
value = facefusion.globals.keep_fps
|
||||
)
|
||||
KEEP_TEMP_CHECKBOX = gradio.Checkbox(
|
||||
label = wording.get('keep_temp_checkbox_label'),
|
||||
value = facefusion.globals.keep_temp
|
||||
)
|
||||
SKIP_AUDIO_CHECKBOX = gradio.Checkbox(
|
||||
label = wording.get('skip_audio_checkbox_label'),
|
||||
value = facefusion.globals.skip_audio
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs = KEEP_FPS_CHECKBOX, outputs = KEEP_FPS_CHECKBOX)
|
||||
KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs = KEEP_TEMP_CHECKBOX, outputs = KEEP_TEMP_CHECKBOX)
|
||||
SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs = SKIP_AUDIO_CHECKBOX, outputs = SKIP_AUDIO_CHECKBOX)
|
||||
|
||||
|
||||
def update_checkbox(name : str, value: bool) -> Update:
|
||||
setattr(facefusion.globals, name, value)
|
||||
return gradio.update(value = value)
|
||||
48
facefusion/uis/components/source.py
Normal file
48
facefusion/uis/components/source.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from typing import Any, IO, Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import is_image
|
||||
|
||||
SOURCE_FILE : Optional[gradio.File] = None
|
||||
SOURCE_IMAGE : Optional[gradio.Image] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global SOURCE_FILE
|
||||
global SOURCE_IMAGE
|
||||
|
||||
with gradio.Box():
|
||||
is_source_image = is_image(facefusion.globals.source_path)
|
||||
SOURCE_FILE = gradio.File(
|
||||
file_count = 'single',
|
||||
file_types=
|
||||
[
|
||||
'.png',
|
||||
'.jpg',
|
||||
'.webp'
|
||||
],
|
||||
label = wording.get('source_file_label'),
|
||||
value = facefusion.globals.source_path if is_source_image else None
|
||||
)
|
||||
ui.register_component('source_file', SOURCE_FILE)
|
||||
SOURCE_IMAGE = gradio.Image(
|
||||
value = SOURCE_FILE.value['name'] if is_source_image else None,
|
||||
visible = is_source_image,
|
||||
show_label = False
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE)
|
||||
|
||||
|
||||
def update(file: IO[Any]) -> Update:
|
||||
if file and is_image(file.name):
|
||||
facefusion.globals.source_path = file.name
|
||||
return gradio.update(value = file.name, visible = True)
|
||||
facefusion.globals.source_path = None
|
||||
return gradio.update(value = None, visible = False)
|
||||
62
facefusion/uis/components/target.py
Normal file
62
facefusion/uis/components/target.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from typing import Any, IO, Tuple, Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.face_reference import clear_face_reference
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import is_image, is_video
|
||||
|
||||
TARGET_FILE : Optional[gradio.File] = None
|
||||
TARGET_IMAGE : Optional[gradio.Image] = None
|
||||
TARGET_VIDEO : Optional[gradio.Video] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TARGET_FILE
|
||||
global TARGET_IMAGE
|
||||
global TARGET_VIDEO
|
||||
|
||||
with gradio.Box():
|
||||
is_target_image = is_image(facefusion.globals.target_path)
|
||||
is_target_video = is_video(facefusion.globals.target_path)
|
||||
TARGET_FILE = gradio.File(
|
||||
label = wording.get('target_file_label'),
|
||||
file_count = 'single',
|
||||
file_types =
|
||||
[
|
||||
'.png',
|
||||
'.jpg',
|
||||
'.webp',
|
||||
'.mp4'
|
||||
],
|
||||
value = facefusion.globals.target_path if is_target_image or is_target_video else None
|
||||
)
|
||||
TARGET_IMAGE = gradio.Image(
|
||||
value = TARGET_FILE.value['name'] if is_target_image else None,
|
||||
visible = is_target_image,
|
||||
show_label = False
|
||||
)
|
||||
TARGET_VIDEO = gradio.Video(
|
||||
value = TARGET_FILE.value['name'] if is_target_video else None,
|
||||
visible = is_target_video,
|
||||
show_label = False
|
||||
)
|
||||
ui.register_component('target_file', TARGET_FILE)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
|
||||
|
||||
|
||||
def update(file : IO[Any]) -> Tuple[Update, Update]:
|
||||
clear_face_reference()
|
||||
if file and is_image(file.name):
|
||||
facefusion.globals.target_path = file.name
|
||||
return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False)
|
||||
if file and is_video(file.name):
|
||||
facefusion.globals.target_path = file.name
|
||||
return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True)
|
||||
facefusion.globals.target_path = None
|
||||
return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False)
|
||||
44
facefusion/uis/components/temp_frame.py
Normal file
44
facefusion/uis/components/temp_frame.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from typing import Optional
|
||||
import gradio
|
||||
|
||||
import facefusion.choices
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.typing import TempFrameFormat
|
||||
|
||||
from facefusion.uis.typing import Update
|
||||
|
||||
TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TEMP_FRAME_FORMAT_DROPDOWN
|
||||
global TEMP_FRAME_QUALITY_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('temp_frame_format_dropdown_label'),
|
||||
choices = facefusion.choices.temp_frame_format,
|
||||
value = facefusion.globals.temp_frame_format
|
||||
)
|
||||
TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
|
||||
label = wording.get('temp_frame_quality_slider_label'),
|
||||
value = facefusion.globals.temp_frame_quality,
|
||||
step = 1
|
||||
)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN, outputs = TEMP_FRAME_FORMAT_DROPDOWN)
|
||||
TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER, outputs = TEMP_FRAME_QUALITY_SLIDER)
|
||||
|
||||
|
||||
def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> Update:
|
||||
facefusion.globals.temp_frame_format = temp_frame_format
|
||||
return gradio.update(value = temp_frame_format)
|
||||
|
||||
|
||||
def update_temp_frame_quality(temp_frame_quality : int) -> Update:
|
||||
facefusion.globals.temp_frame_quality = temp_frame_quality
|
||||
return gradio.update(value = temp_frame_quality)
|
||||
66
facefusion/uis/components/trim_frame.py
Normal file
66
facefusion/uis/components/trim_frame.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from time import sleep
|
||||
from typing import Any, Dict, Tuple, Optional
|
||||
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import wording
|
||||
from facefusion.capturer import get_video_frame_total
|
||||
from facefusion.uis import core as ui
|
||||
from facefusion.uis.typing import Update
|
||||
from facefusion.utilities import is_video
|
||||
|
||||
TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
|
||||
TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global TRIM_FRAME_START_SLIDER
|
||||
global TRIM_FRAME_END_SLIDER
|
||||
|
||||
with gradio.Box():
|
||||
trim_frame_start_slider_args : Dict[str, Any] = {
|
||||
'label': wording.get('trim_frame_start_slider_label'),
|
||||
'value': facefusion.globals.trim_frame_start,
|
||||
'step': 1,
|
||||
'visible': False
|
||||
}
|
||||
trim_frame_end_slider_args : Dict[str, Any] = {
|
||||
'label': wording.get('trim_frame_end_slider_label'),
|
||||
'value': facefusion.globals.trim_frame_end,
|
||||
'step': 1,
|
||||
'visible': False
|
||||
}
|
||||
if is_video(facefusion.globals.target_path):
|
||||
video_frame_total = get_video_frame_total(facefusion.globals.target_path)
|
||||
trim_frame_start_slider_args['maximum'] = video_frame_total
|
||||
trim_frame_start_slider_args['visible'] = True
|
||||
trim_frame_end_slider_args['value'] = video_frame_total
|
||||
trim_frame_end_slider_args['maximum'] = video_frame_total
|
||||
trim_frame_end_slider_args['visible'] = True
|
||||
with gradio.Row():
|
||||
TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
|
||||
TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
target_file = ui.get_component('target_file')
|
||||
if target_file:
|
||||
target_file.change(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ])
|
||||
TRIM_FRAME_START_SLIDER.change(lambda value : update_number('trim_frame_start', int(value)), inputs = TRIM_FRAME_START_SLIDER, outputs = TRIM_FRAME_START_SLIDER)
|
||||
TRIM_FRAME_END_SLIDER.change(lambda value : update_number('trim_frame_end', int(value)), inputs = TRIM_FRAME_END_SLIDER, outputs = TRIM_FRAME_END_SLIDER)
|
||||
|
||||
|
||||
def remote_update() -> Tuple[Update, Update]:
|
||||
sleep(0.1)
|
||||
if is_video(facefusion.globals.target_path):
|
||||
video_frame_total = get_video_frame_total(facefusion.globals.target_path)
|
||||
facefusion.globals.trim_frame_start = 0
|
||||
facefusion.globals.trim_frame_end = video_frame_total
|
||||
return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True)
|
||||
return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False)
|
||||
|
||||
|
||||
def update_number(name : str, value : int) -> Update:
|
||||
setattr(facefusion.globals, name, value)
|
||||
return gradio.update(value = value)
|
||||
67
facefusion/uis/core.py
Normal file
67
facefusion/uis/core.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from typing import Dict, Optional, Any
|
||||
import importlib
|
||||
import sys
|
||||
import cv2
|
||||
import gradio
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import metadata, wording
|
||||
from facefusion.typing import Frame
|
||||
from facefusion.uis.typing import Component, ComponentName
|
||||
|
||||
COMPONENTS: Dict[ComponentName, Component] = {}
|
||||
UI_LAYOUT_METHODS =\
|
||||
[
|
||||
'pre_check',
|
||||
'render',
|
||||
'listen'
|
||||
]
|
||||
|
||||
|
||||
def launch() -> None:
|
||||
with gradio.Blocks(theme = get_theme(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui:
|
||||
for ui_layout in facefusion.globals.ui_layouts:
|
||||
ui_layout_module = load_ui_layout_module(ui_layout)
|
||||
ui_layout_module.pre_check()
|
||||
ui_layout_module.render()
|
||||
ui_layout_module.listen()
|
||||
ui.launch(show_api = False)
|
||||
|
||||
|
||||
def load_ui_layout_module(ui_layout : str) -> Any:
|
||||
try:
|
||||
ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout)
|
||||
for method_name in UI_LAYOUT_METHODS:
|
||||
if not hasattr(ui_layout_module, method_name):
|
||||
raise NotImplementedError
|
||||
except ModuleNotFoundError:
|
||||
sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout))
|
||||
except NotImplementedError:
|
||||
sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout))
|
||||
return ui_layout_module
|
||||
|
||||
|
||||
def get_theme() -> gradio.Theme:
|
||||
return gradio.themes.Soft(
|
||||
primary_hue = gradio.themes.colors.red,
|
||||
secondary_hue = gradio.themes.colors.gray,
|
||||
font = gradio.themes.GoogleFont('Inter')
|
||||
).set(
|
||||
background_fill_primary = '*neutral_50',
|
||||
block_label_text_size = '*text_sm',
|
||||
block_title_text_size = '*text_sm'
|
||||
)
|
||||
|
||||
|
||||
def get_component(name: ComponentName) -> Optional[Component]:
|
||||
if name in COMPONENTS:
|
||||
return COMPONENTS[name]
|
||||
return None
|
||||
|
||||
|
||||
def register_component(name: ComponentName, component: Component) -> None:
|
||||
COMPONENTS[name] = component
|
||||
|
||||
|
||||
def normalize_frame(frame : Frame) -> Frame:
|
||||
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
37
facefusion/uis/layouts/benchmark.py
Normal file
37
facefusion/uis/layouts/benchmark.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import gradio
|
||||
|
||||
from facefusion.uis.components import about, processors, execution, benchmark
|
||||
from facefusion.utilities import conditional_download
|
||||
|
||||
|
||||
def pre_check() -> bool:
|
||||
conditional_download('.assets/examples',
|
||||
[
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/source.jpg',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-240p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-360p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-540p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-720p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-1080p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-1440p.mp4',
|
||||
'https://huggingface.co/facefusion/examples/resolve/main/target-2160p.mp4'
|
||||
])
|
||||
return True
|
||||
|
||||
|
||||
def render() -> gradio.Blocks:
|
||||
with gradio.Blocks() as layout:
|
||||
with gradio.Row():
|
||||
with gradio.Column(scale = 2):
|
||||
about.render()
|
||||
processors.render()
|
||||
execution.render()
|
||||
with gradio.Column(scale= 5):
|
||||
benchmark.render()
|
||||
return layout
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
processors.listen()
|
||||
execution.listen()
|
||||
benchmark.listen()
|
||||
44
facefusion/uis/layouts/default.py
Normal file
44
facefusion/uis/layouts/default.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import gradio
|
||||
|
||||
from facefusion.uis.components import about, processors, execution, temp_frame, settings, source, target, preview, trim_frame, face_analyser, face_selector, output_settings, output
|
||||
|
||||
|
||||
def pre_check() -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def render() -> gradio.Blocks:
|
||||
with gradio.Blocks() as layout:
|
||||
with gradio.Row():
|
||||
with gradio.Column(scale = 2):
|
||||
about.render()
|
||||
processors.render()
|
||||
execution.render()
|
||||
temp_frame.render()
|
||||
settings.render()
|
||||
with gradio.Column(scale = 2):
|
||||
source.render()
|
||||
target.render()
|
||||
output_settings.render()
|
||||
output.render()
|
||||
with gradio.Column(scale = 3):
|
||||
preview.render()
|
||||
trim_frame.render()
|
||||
face_selector.render()
|
||||
face_analyser.render()
|
||||
return layout
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
processors.listen()
|
||||
execution.listen()
|
||||
settings.listen()
|
||||
temp_frame.listen()
|
||||
source.listen()
|
||||
target.listen()
|
||||
preview.listen()
|
||||
trim_frame.listen()
|
||||
face_selector.listen()
|
||||
face_analyser.listen()
|
||||
output_settings.listen()
|
||||
output.listen()
|
||||
18
facefusion/uis/typing.py
Normal file
18
facefusion/uis/typing.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from typing import Literal, Dict, Any
|
||||
import gradio
|
||||
|
||||
Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider
|
||||
ComponentName = Literal\
|
||||
[
|
||||
'source_file',
|
||||
'target_file',
|
||||
'preview_frame_slider',
|
||||
'face_recognition_dropdown',
|
||||
'reference_face_position_gallery',
|
||||
'reference_face_distance_slider',
|
||||
'face_analyser_direction_dropdown',
|
||||
'face_analyser_age_dropdown',
|
||||
'face_analyser_gender_dropdown',
|
||||
'frame_processors_checkbox_group'
|
||||
]
|
||||
Update = Dict[Any, Any]
|
||||
Reference in New Issue
Block a user