Files
facefusion/tests/test_face_analyser.py
Henry Ruhs 8bf9170577 3.5.0 (#977)
* Mark as NEXT

* Reduce caching to avoid RAM explosion

* Reduce caching to avoid RAM explosion

* Update dependencies

* add face-detector-pad-factor

* update facefusion.ini

* fix test

* change pad to margin

* fix order

* add prepare margin

* use 50% max margin

* Minor fixes part2

* Minor fixes part3

* Minor fixes part4

* Minor fixes part1

* Downgrade onnxruntime as of BiRefNet broken on CPU

add test

update

update facefusion.ini

add birefnet

* rename models

add more models

* Fix versions

* Add .claude to gitignore

* add normalize color

add 4 channel

add colors

* worflows

* cleanup

* cleanup

* cleanup

* cleanup

* add more models (#961)

* Fix naming

* changes

* Fix style and mock Gradio

* Fix style and mock Gradio

* Fix style and mock Gradio

* apply clamp

* remove clamp

* Add normalizer test

* Introduce sanitizer for the rescue (#963)

* Introduce sanitizer for the rescue

* Introduce sanitizer for the rescue

* Introduce sanitizer for the rescue

* prepare ffmpeg for alpha support

* Some cleanup

* Some cleanup

* Fix CI

* List as TypeAlias is not allowed (#967)

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* List as TypeAlias is not allowed

* Add mpeg and mxf support (#968)

* Add mpeg support

* Add mxf support

* Adjust fix_xxx_encoder for the new formats

* Extend output pattern for batch-run (#969)

* Extend output pattern for batch-run

* Add {target_extension} to allowed mixed files

* Catch invalid output pattern keys

* alpha support

* cleanup

* cleanup

* add ProcessorOutputs type

* fix preview and streamer, support alpha for background_remover

* Refactor/open close processors (#972)

* Introduce open/close processors

* Add locales for translator

* Introduce __autoload__ for translator

* More cleanup

* Fix import issues

* Resolve the scope situation for locals

* Fix installer by not using translator

* Fixes after merge

* Fixes after merge

* Fix translator keys in ui

* Use LOCALS in installer

* Update and partial fix DirectML

* Use latest onnxruntime

* Fix performance

* Fix lint issues

* fix mask

* fix lint

* fix lint

* Remove default from translator.get()

* remove 'framerate='

* fix test

* Rename and reorder models

* Align naming

* add alpha preview

* fix frame-by-frame

* Add alpha effect via css

* preview support alpha channel

* fix preview modes

* Use official assets repositories

* Add support for u2net_cloth

* fix naming

* Add more models

* Add vendor, license and year direct to the models

* Add vendor, license and year direct to the models

* Update dependencies, Minor CSS adjustment

* Ready for 3.5.0

* Fix naming

* Update about messages

* Fix return

* Use groups to show/hide

* Update preview

* Conditional merge mask

* Conditional merge mask

* Fix import order

---------

Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com>
Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
2025-11-03 14:05:15 +01:00

132 lines
4.4 KiB
Python

import subprocess
import pytest
from facefusion import face_classifier, face_detector, face_landmarker, face_recognizer, state_manager
from facefusion.download import conditional_download
from facefusion.face_analyser import get_many_faces
from facefusion.vision import read_static_image
from .helper import get_test_example_file, get_test_examples_directory
@pytest.fixture(scope = 'module', autouse = True)
def before_all() -> None:
conditional_download(get_test_examples_directory(),
[
'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/source.jpg'
])
subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.jpg'), '-vf', 'crop=iw*0.8:ih*0.8', get_test_example_file('source-80crop.jpg') ])
subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.jpg'), '-vf', 'crop=iw*0.7:ih*0.7', get_test_example_file('source-70crop.jpg') ])
subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.jpg'), '-vf', 'crop=iw*0.6:ih*0.6', get_test_example_file('source-60crop.jpg') ])
state_manager.init_item('execution_device_ids', [ '0' ])
state_manager.init_item('execution_providers', [ 'cpu' ])
state_manager.init_item('download_providers', [ 'github' ])
state_manager.init_item('face_detector_angles', [ 0 ])
state_manager.init_item('face_detector_model', 'many')
state_manager.init_item('face_detector_score', 0.5)
state_manager.init_item('face_landmarker_model', 'many')
state_manager.init_item('face_landmarker_score', 0.5)
face_classifier.pre_check()
face_landmarker.pre_check()
face_recognizer.pre_check()
@pytest.fixture(autouse = True)
def before_each() -> None:
face_classifier.clear_inference_pool()
face_detector.clear_inference_pool()
face_landmarker.clear_inference_pool()
face_recognizer.clear_inference_pool()
def test_get_one_face_with_retinaface() -> None:
state_manager.init_item('face_detector_model', 'retinaface')
state_manager.init_item('face_detector_size', '320x320')
state_manager.init_item('face_detector_margin', (0, 0, 0, 0))
face_detector.pre_check()
source_paths =\
[
get_test_example_file('source.jpg'),
get_test_example_file('source-80crop.jpg'),
get_test_example_file('source-70crop.jpg'),
get_test_example_file('source-60crop.jpg')
]
for source_path in source_paths:
source_frame = read_static_image(source_path)
many_faces = get_many_faces([ source_frame ])
assert len(many_faces) == 1
def test_get_one_face_with_scrfd() -> None:
state_manager.init_item('face_detector_model', 'scrfd')
state_manager.init_item('face_detector_size', '640x640')
state_manager.init_item('face_detector_margin', (0, 0, 0, 0))
face_detector.pre_check()
source_paths =\
[
get_test_example_file('source.jpg'),
get_test_example_file('source-80crop.jpg'),
get_test_example_file('source-70crop.jpg'),
get_test_example_file('source-60crop.jpg')
]
for source_path in source_paths:
source_frame = read_static_image(source_path)
many_faces = get_many_faces([ source_frame ])
assert len(many_faces) == 1
def test_get_one_face_with_yoloface() -> None:
state_manager.init_item('face_detector_model', 'yoloface')
state_manager.init_item('face_detector_size', '640x640')
state_manager.init_item('face_detector_margin', (0, 0, 0, 0))
face_detector.pre_check()
source_paths =\
[
get_test_example_file('source.jpg'),
get_test_example_file('source-80crop.jpg'),
get_test_example_file('source-70crop.jpg'),
get_test_example_file('source-60crop.jpg')
]
for source_path in source_paths:
source_frame = read_static_image(source_path)
many_faces = get_many_faces([ source_frame ])
assert len(many_faces) == 1
def test_get_one_face_with_yunet() -> None:
state_manager.init_item('face_detector_model', 'yunet')
state_manager.init_item('face_detector_size', '640x640')
state_manager.init_item('face_detector_margin', (0, 0, 0, 0))
face_detector.pre_check()
source_paths =\
[
get_test_example_file('source.jpg'),
get_test_example_file('source-80crop.jpg'),
get_test_example_file('source-70crop.jpg'),
get_test_example_file('source-60crop.jpg')
]
for source_path in source_paths:
source_frame = read_static_image(source_path)
many_faces = get_many_faces([ source_frame ])
assert len(many_faces) == 1
def test_get_many_faces() -> None:
source_path = get_test_example_file('source.jpg')
source_frame = read_static_image(source_path)
many_faces = get_many_faces([ source_frame, source_frame, source_frame ])
assert len(many_faces) == 3