diff --git a/facefusion/content_analyser.py b/facefusion/content_analyser.py index 59abc1f..151bf67 100644 --- a/facefusion/content_analyser.py +++ b/facefusion/content_analyser.py @@ -9,7 +9,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore from facefusion.typing import Detection, DownloadScope, Fps, InferencePool, ModelOptions, ModelSet, Score, VisionFrame -from facefusion.vision import detect_video_fps, get_video_frame, read_image, resize_frame_resolution +from facefusion.vision import detect_video_fps, read_image, read_video_frame, resize_frame_resolution STREAM_COUNTER = 0 @@ -93,7 +93,7 @@ def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int for frame_number in frame_range: if frame_number % int(video_fps) == 0: - vision_frame = get_video_frame(video_path, frame_number) + vision_frame = read_video_frame(video_path, frame_number) if analyse_frame(vision_frame): counter += 1 rate = counter * int(video_fps) / len(frame_range) * 100 diff --git a/facefusion/core.py b/facefusion/core.py index d3cf9cb..19ac0cd 100755 --- a/facefusion/core.py +++ b/facefusion/core.py @@ -26,7 +26,7 @@ from facefusion.program_helper import validate_args from facefusion.statistics import conditional_log_statistics from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths from facefusion.typing import Args, ErrorCode -from facefusion.vision import get_video_frame, pack_resolution, read_image, read_static_images, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution +from facefusion.vision import pack_resolution, read_image, read_static_images, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution def cli() -> None: @@ -348,7 +348,7 @@ def conditional_append_reference_faces() -> None: source_faces = get_many_faces(source_frames) source_face = get_average_face(source_faces) if is_video(state_manager.get_item('target_path')): - reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) + reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) else: reference_frame = read_image(state_manager.get_item('target_path')) reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ])) diff --git a/facefusion/processors/modules/expression_restorer.py b/facefusion/processors/modules/expression_restorer.py index e5a276a..f5b64d8 100755 --- a/facefusion/processors/modules/expression_restorer.py +++ b/facefusion/processors/modules/expression_restorer.py @@ -23,7 +23,7 @@ from facefusion.processors.typing import ExpressionRestorerInputs, LivePortraitE from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame -from facefusion.vision import get_video_frame, read_image, read_static_image, write_image +from facefusion.vision import read_image, read_static_image, read_video_frame, write_image @lru_cache(maxsize = None) @@ -264,7 +264,7 @@ def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], frame_number = queue_payload.get('frame_number') if state_manager.get_item('trim_frame_start'): frame_number += state_manager.get_item('trim_frame_start') - source_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number) + source_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number) target_vision_path = queue_payload.get('frame_path') target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( diff --git a/facefusion/uis/components/face_selector.py b/facefusion/uis/components/face_selector.py index 2cb3157..bcb94e4 100644 --- a/facefusion/uis/components/face_selector.py +++ b/facefusion/uis/components/face_selector.py @@ -14,7 +14,7 @@ from facefusion.typing import FaceSelectorMode, FaceSelectorOrder, Gender, Race, from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component from facefusion.uis.typing import ComponentOptions from facefusion.uis.ui_helper import convert_str_none -from facefusion.vision import get_video_frame, normalize_frame_color, read_static_image +from facefusion.vision import read_video_frame, normalize_frame_color, read_static_image FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None FACE_SELECTOR_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None @@ -46,7 +46,7 @@ def render() -> None: reference_frame = read_static_image(state_manager.get_item('target_path')) reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame) if is_video(state_manager.get_item('target_path')): - reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) + reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame) FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_selector_mode_dropdown'), @@ -197,7 +197,7 @@ def update_reference_position_gallery() -> gradio.Gallery: temp_vision_frame = read_static_image(state_manager.get_item('target_path')) gallery_vision_frames = extract_gallery_frames(temp_vision_frame) if is_video(state_manager.get_item('target_path')): - temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) + temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) gallery_vision_frames = extract_gallery_frames(temp_vision_frame) if gallery_vision_frames: return gradio.Gallery(value = gallery_vision_frames) diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index a38338f..164c00a 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -18,7 +18,7 @@ from facefusion.processors.core import get_processors_modules from facefusion.typing import AudioFrame, Face, FaceSet, VisionFrame from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component from facefusion.uis.typing import ComponentOptions -from facefusion.vision import count_video_frame_total, detect_frame_orientation, get_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution +from facefusion.vision import count_video_frame_total, detect_frame_orientation, read_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution PREVIEW_IMAGE : Optional[gradio.Image] = None PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None @@ -60,7 +60,7 @@ def render() -> None: preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ] if is_video(state_manager.get_item('target_path')): - temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) + temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_image_options['value'] = normalize_frame_color(preview_vision_frame) preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ] @@ -184,7 +184,7 @@ def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image: def slide_preview_image(frame_number : int = 0) -> gradio.Image: if is_video(state_manager.get_item('target_path')): - preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path'), frame_number)) + preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path'), frame_number)) preview_vision_frame = resize_frame_resolution(preview_vision_frame, (1024, 1024)) return gradio.Image(value = preview_vision_frame) return gradio.Image(value = None) @@ -222,7 +222,7 @@ def update_preview_image(frame_number : int = 0) -> gradio.Image: return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]) if is_video(state_manager.get_item('target_path')): - temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number) + temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame) return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]) diff --git a/facefusion/uis/components/target.py b/facefusion/uis/components/target.py index a87cf00..f8c44bf 100644 --- a/facefusion/uis/components/target.py +++ b/facefusion/uis/components/target.py @@ -7,7 +7,7 @@ from facefusion.face_store import clear_reference_faces, clear_static_faces from facefusion.filesystem import get_file_size, is_image, is_video from facefusion.uis.core import register_ui_component from facefusion.uis.typing import ComponentOptions, File -from facefusion.vision import get_video_frame, normalize_frame_color +from facefusion.vision import read_video_frame, normalize_frame_color FILE_SIZE_LIMIT = 512 * 1024 * 1024 @@ -43,7 +43,7 @@ def render() -> None: target_image_options['visible'] = True if is_target_video: if get_file_size(state_manager.get_item('target_path')) > FILE_SIZE_LIMIT: - preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path'))) + preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path'))) target_image_options['value'] = preview_vision_frame target_image_options['visible'] = True else: @@ -68,7 +68,7 @@ def update(file : File) -> Tuple[gradio.Image, gradio.Video]: if file and is_video(file.name): state_manager.set_item('target_path', file.name) if get_file_size(file.name) > FILE_SIZE_LIMIT: - preview_vision_frame = normalize_frame_color(get_video_frame(file.name)) + preview_vision_frame = normalize_frame_color(read_video_frame(file.name)) return gradio.Image(value = preview_vision_frame, visible = True), gradio.Video(value = None, visible = False) return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True) state_manager.clear_item('target_path') diff --git a/facefusion/vision.py b/facefusion/vision.py index 06773e3..02b7aed 100644 --- a/facefusion/vision.py +++ b/facefusion/vision.py @@ -28,8 +28,8 @@ def read_static_images(image_paths : List[str]) -> List[VisionFrame]: def read_image(image_path : str) -> Optional[VisionFrame]: if is_image(image_path): if is_windows(): - image_binary = numpy.fromfile(image_path, dtype = numpy.uint8) - return cv2.imdecode(image_binary, cv2.IMREAD_COLOR) + image_buffer = numpy.fromfile(image_path, dtype = numpy.uint8) + return cv2.imdecode(image_buffer, cv2.IMREAD_COLOR) return cv2.imread(image_path) return None @@ -78,7 +78,7 @@ def create_image_resolutions(resolution : Resolution) -> List[str]: return resolutions -def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]: +def read_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]: if is_video(video_path): video_capture = cv2.VideoCapture(video_path) if video_capture.isOpened(): diff --git a/tests/test_vision.py b/tests/test_vision.py index bd16476..1f11558 100644 --- a/tests/test_vision.py +++ b/tests/test_vision.py @@ -3,7 +3,7 @@ import subprocess import pytest from facefusion.download import conditional_download -from facefusion.vision import calc_histogram_difference, count_trim_frame_total, count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_duration, detect_video_fps, detect_video_resolution, get_video_frame, match_frame_color, normalize_resolution, pack_resolution, read_image, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution, write_image +from facefusion.vision import calc_histogram_difference, count_trim_frame_total, count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_duration, detect_video_fps, detect_video_resolution, match_frame_color, normalize_resolution, pack_resolution, read_image, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution, write_image from .helper import get_test_example_file, get_test_examples_directory, get_test_output_file, prepare_test_output_directory @@ -68,9 +68,9 @@ def test_create_image_resolutions() -> None: assert create_image_resolutions(None) == [] -def test_get_video_frame() -> None: - assert hasattr(get_video_frame(get_test_example_file('target-240p-25fps.mp4')), '__array_interface__') - assert get_video_frame('invalid') is None +def test_read_video_frame() -> None: + assert hasattr(read_video_frame(get_test_example_file('target-240p-25fps.mp4')), '__array_interface__') + assert read_video_frame('invalid') is None def test_count_video_frame_total() -> None: