Rename to read_video_frame

This commit is contained in:
henryruhs
2025-01-24 15:28:35 +01:00
parent 0e6ee69c53
commit 4f32ed7868
8 changed files with 23 additions and 23 deletions

View File

@@ -9,7 +9,7 @@ from facefusion.download import conditional_download_hashes, conditional_downloa
from facefusion.filesystem import resolve_relative_path
from facefusion.thread_helper import conditional_thread_semaphore
from facefusion.typing import Detection, DownloadScope, Fps, InferencePool, ModelOptions, ModelSet, Score, VisionFrame
from facefusion.vision import detect_video_fps, get_video_frame, read_image, resize_frame_resolution
from facefusion.vision import detect_video_fps, read_image, read_video_frame, resize_frame_resolution
STREAM_COUNTER = 0
@@ -93,7 +93,7 @@ def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int
for frame_number in frame_range:
if frame_number % int(video_fps) == 0:
vision_frame = get_video_frame(video_path, frame_number)
vision_frame = read_video_frame(video_path, frame_number)
if analyse_frame(vision_frame):
counter += 1
rate = counter * int(video_fps) / len(frame_range) * 100

View File

@@ -26,7 +26,7 @@ from facefusion.program_helper import validate_args
from facefusion.statistics import conditional_log_statistics
from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths
from facefusion.typing import Args, ErrorCode
from facefusion.vision import get_video_frame, pack_resolution, read_image, read_static_images, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution
from facefusion.vision import pack_resolution, read_image, read_static_images, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution
def cli() -> None:
@@ -348,7 +348,7 @@ def conditional_append_reference_faces() -> None:
source_faces = get_many_faces(source_frames)
source_face = get_average_face(source_faces)
if is_video(state_manager.get_item('target_path')):
reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
else:
reference_frame = read_image(state_manager.get_item('target_path'))
reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ]))

View File

@@ -23,7 +23,7 @@ from facefusion.processors.typing import ExpressionRestorerInputs, LivePortraitE
from facefusion.program_helper import find_argument_group
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
from facefusion.vision import get_video_frame, read_image, read_static_image, write_image
from facefusion.vision import read_image, read_static_image, read_video_frame, write_image
@lru_cache(maxsize = None)
@@ -264,7 +264,7 @@ def process_frames(source_path : List[str], queue_payloads : List[QueuePayload],
frame_number = queue_payload.get('frame_number')
if state_manager.get_item('trim_frame_start'):
frame_number += state_manager.get_item('trim_frame_start')
source_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
source_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
target_vision_path = queue_payload.get('frame_path')
target_vision_frame = read_image(target_vision_path)
output_vision_frame = process_frame(

View File

@@ -14,7 +14,7 @@ from facefusion.typing import FaceSelectorMode, FaceSelectorOrder, Gender, Race,
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.typing import ComponentOptions
from facefusion.uis.ui_helper import convert_str_none
from facefusion.vision import get_video_frame, normalize_frame_color, read_static_image
from facefusion.vision import read_video_frame, normalize_frame_color, read_static_image
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -46,7 +46,7 @@ def render() -> None:
reference_frame = read_static_image(state_manager.get_item('target_path'))
reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame)
if is_video(state_manager.get_item('target_path')):
reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
reference_face_gallery_options['value'] = extract_gallery_frames(reference_frame)
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('uis.face_selector_mode_dropdown'),
@@ -197,7 +197,7 @@ def update_reference_position_gallery() -> gradio.Gallery:
temp_vision_frame = read_static_image(state_manager.get_item('target_path'))
gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
gallery_vision_frames = extract_gallery_frames(temp_vision_frame)
if gallery_vision_frames:
return gradio.Gallery(value = gallery_vision_frames)

View File

@@ -18,7 +18,7 @@ from facefusion.processors.core import get_processors_modules
from facefusion.typing import AudioFrame, Face, FaceSet, VisionFrame
from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component
from facefusion.uis.typing import ComponentOptions
from facefusion.vision import count_video_frame_total, detect_frame_orientation, get_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution
from facefusion.vision import count_video_frame_total, detect_frame_orientation, read_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution
PREVIEW_IMAGE : Optional[gradio.Image] = None
PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
@@ -60,7 +60,7 @@ def render() -> None:
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
preview_image_options['value'] = normalize_frame_color(preview_vision_frame)
preview_image_options['elem_classes'] = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ]
@@ -184,7 +184,7 @@ def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
def slide_preview_image(frame_number : int = 0) -> gradio.Image:
if is_video(state_manager.get_item('target_path')):
preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path'), frame_number))
preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path'), frame_number))
preview_vision_frame = resize_frame_resolution(preview_vision_frame, (1024, 1024))
return gradio.Image(value = preview_vision_frame)
return gradio.Image(value = None)
@@ -222,7 +222,7 @@ def update_preview_image(frame_number : int = 0) -> gradio.Image:
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])
if is_video(state_manager.get_item('target_path')):
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
preview_vision_frame = normalize_frame_color(preview_vision_frame)
return gradio.Image(value = preview_vision_frame, elem_classes = [ 'image-preview', 'is-' + detect_frame_orientation(preview_vision_frame) ])

View File

@@ -7,7 +7,7 @@ from facefusion.face_store import clear_reference_faces, clear_static_faces
from facefusion.filesystem import get_file_size, is_image, is_video
from facefusion.uis.core import register_ui_component
from facefusion.uis.typing import ComponentOptions, File
from facefusion.vision import get_video_frame, normalize_frame_color
from facefusion.vision import read_video_frame, normalize_frame_color
FILE_SIZE_LIMIT = 512 * 1024 * 1024
@@ -43,7 +43,7 @@ def render() -> None:
target_image_options['visible'] = True
if is_target_video:
if get_file_size(state_manager.get_item('target_path')) > FILE_SIZE_LIMIT:
preview_vision_frame = normalize_frame_color(get_video_frame(state_manager.get_item('target_path')))
preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path')))
target_image_options['value'] = preview_vision_frame
target_image_options['visible'] = True
else:
@@ -68,7 +68,7 @@ def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
if file and is_video(file.name):
state_manager.set_item('target_path', file.name)
if get_file_size(file.name) > FILE_SIZE_LIMIT:
preview_vision_frame = normalize_frame_color(get_video_frame(file.name))
preview_vision_frame = normalize_frame_color(read_video_frame(file.name))
return gradio.Image(value = preview_vision_frame, visible = True), gradio.Video(value = None, visible = False)
return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True)
state_manager.clear_item('target_path')

View File

@@ -28,8 +28,8 @@ def read_static_images(image_paths : List[str]) -> List[VisionFrame]:
def read_image(image_path : str) -> Optional[VisionFrame]:
if is_image(image_path):
if is_windows():
image_binary = numpy.fromfile(image_path, dtype = numpy.uint8)
return cv2.imdecode(image_binary, cv2.IMREAD_COLOR)
image_buffer = numpy.fromfile(image_path, dtype = numpy.uint8)
return cv2.imdecode(image_buffer, cv2.IMREAD_COLOR)
return cv2.imread(image_path)
return None
@@ -78,7 +78,7 @@ def create_image_resolutions(resolution : Resolution) -> List[str]:
return resolutions
def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]:
def read_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]:
if is_video(video_path):
video_capture = cv2.VideoCapture(video_path)
if video_capture.isOpened():

View File

@@ -3,7 +3,7 @@ import subprocess
import pytest
from facefusion.download import conditional_download
from facefusion.vision import calc_histogram_difference, count_trim_frame_total, count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_duration, detect_video_fps, detect_video_resolution, get_video_frame, match_frame_color, normalize_resolution, pack_resolution, read_image, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution, write_image
from facefusion.vision import calc_histogram_difference, count_trim_frame_total, count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_duration, detect_video_fps, detect_video_resolution, match_frame_color, normalize_resolution, pack_resolution, read_image, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution, write_image
from .helper import get_test_example_file, get_test_examples_directory, get_test_output_file, prepare_test_output_directory
@@ -68,9 +68,9 @@ def test_create_image_resolutions() -> None:
assert create_image_resolutions(None) == []
def test_get_video_frame() -> None:
assert hasattr(get_video_frame(get_test_example_file('target-240p-25fps.mp4')), '__array_interface__')
assert get_video_frame('invalid') is None
def test_read_video_frame() -> None:
assert hasattr(read_video_frame(get_test_example_file('target-240p-25fps.mp4')), '__array_interface__')
assert read_video_frame('invalid') is None
def test_count_video_frame_total() -> None: