V0pr0S commited on
Commit
74f4dcc
·
0 Parent(s):

Initial commit for FaceFusion-Face-Swap-Hyperswap

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .editorconfig +8 -0
  2. .flake8 +6 -0
  3. .gitignore +6 -0
  4. LICENSE.md +3 -0
  5. README.md +25 -0
  6. app.py +21 -0
  7. facefusion.ico +0 -0
  8. facefusion.ini +123 -0
  9. facefusion.py +10 -0
  10. facefusion/__init__.py +0 -0
  11. facefusion/app_context.py +16 -0
  12. facefusion/args.py +154 -0
  13. facefusion/audio.py +143 -0
  14. facefusion/benchmarker.py +106 -0
  15. facefusion/choices.py +165 -0
  16. facefusion/cli_helper.py +35 -0
  17. facefusion/common_helper.py +84 -0
  18. facefusion/config.py +74 -0
  19. facefusion/content_analyser.py +221 -0
  20. facefusion/core.py +601 -0
  21. facefusion/curl_builder.py +27 -0
  22. facefusion/date_helper.py +28 -0
  23. facefusion/download.py +174 -0
  24. facefusion/execution.py +156 -0
  25. facefusion/exit_helper.py +26 -0
  26. facefusion/face_analyser.py +124 -0
  27. facefusion/face_classifier.py +134 -0
  28. facefusion/face_detector.py +323 -0
  29. facefusion/face_helper.py +254 -0
  30. facefusion/face_landmarker.py +222 -0
  31. facefusion/face_masker.py +240 -0
  32. facefusion/face_recognizer.py +87 -0
  33. facefusion/face_selector.py +115 -0
  34. facefusion/face_store.py +58 -0
  35. facefusion/ffmpeg.py +337 -0
  36. facefusion/ffmpeg_builder.py +260 -0
  37. facefusion/filesystem.py +210 -0
  38. facefusion/hash_helper.py +32 -0
  39. facefusion/inference_manager.py +74 -0
  40. facefusion/installer.py +96 -0
  41. facefusion/jobs/__init__.py +0 -0
  42. facefusion/jobs/job_helper.py +18 -0
  43. facefusion/jobs/job_list.py +34 -0
  44. facefusion/jobs/job_manager.py +265 -0
  45. facefusion/jobs/job_runner.py +112 -0
  46. facefusion/jobs/job_store.py +27 -0
  47. facefusion/json.py +22 -0
  48. facefusion/logger.py +48 -0
  49. facefusion/memory.py +21 -0
  50. facefusion/metadata.py +17 -0
.editorconfig ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ root = true
2
+
3
+ [*]
4
+ end_of_line = lf
5
+ insert_final_newline = true
6
+ indent_size = 4
7
+ indent_style = tab
8
+ trim_trailing_whitespace = true
.flake8 ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [flake8]
2
+ select = E22, E23, E24, E27, E3, E4, E7, F, I1, I2
3
+ per-file-ignores = facefusion.py:E402, install.py:E402
4
+ plugins = flake8-import-order
5
+ application_import_names = facefusion
6
+ import-order-style = pycharm
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .assets
3
+ .caches
4
+ .jobs
5
+ .idea
6
+ .vscode
LICENSE.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ OpenRAIL-AS license
2
+
3
+ Copyright (c) 2025 Henry Ruhs
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: FaceFusion Face Swap HYPERSWAP (CPU)
3
+ emoji: 👽
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 5.50.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: CPU = UNLIMIT (DUPLICATE this Space)!
12
+ ---
13
+
14
+ FaceFusion
15
+ ==========
16
+
17
+ > Industry leading face manipulation platform.
18
+
19
+ [![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci)
20
+ [![Coverage Status](https://img.shields.io/coveralls/facefusion/facefusion.svg)](https://coveralls.io/r/facefusion/facefusion)
21
+ ![License](https://img.shields.io/badge/license-OpenRAIL--AS-green)
22
+
23
+
24
+ -------------
25
+ Read the [documentation](https://docs.facefusion.io) for a deep dive.
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ import sys
5
+ import gradio
6
+
7
+ os.environ['OMP_NUM_THREADS'] = '2' # Adjust based on CPU cores (e.g., 4 for quad-core)
8
+ os.environ['OPENBLAS_NUM_THREADS'] = '1' # Prevents thread contention
9
+ os.environ['MKL_NUM_THREADS'] = '1' # For Intel MKL libraries
10
+
11
+ print("gradio version:", gradio.__version__)
12
+
13
+ from facefusion import core
14
+
15
+ if __name__ == '__main__':
16
+ if len(sys.argv) == 1:
17
+ sys.argv.extend([
18
+ "run",
19
+ "--execution-providers", "cpu", # Force CPU-only mode
20
+ ])
21
+ core.cli()
facefusion.ico ADDED
facefusion.ini ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [paths]
2
+ temp_path =
3
+ jobs_path =
4
+ source_paths =
5
+ target_path =
6
+ output_path =
7
+
8
+ [patterns]
9
+ source_pattern =
10
+ target_pattern =
11
+ output_pattern =
12
+
13
+ [face_detector]
14
+ face_detector_model =
15
+ face_detector_size =
16
+ face_detector_angles =
17
+ face_detector_score =
18
+
19
+ [face_landmarker]
20
+ face_landmarker_model =
21
+ face_landmarker_score =
22
+
23
+ [face_selector]
24
+ face_selector_mode = one
25
+ face_selector_order =
26
+ face_selector_age_start =
27
+ face_selector_age_end =
28
+ face_selector_gender =
29
+ face_selector_race =
30
+ reference_face_position =
31
+ reference_face_distance =
32
+ reference_frame_number =
33
+
34
+ [face_masker]
35
+ face_occluder_model =
36
+ face_parser_model =
37
+ face_mask_types =
38
+ face_mask_areas =
39
+ face_mask_regions =
40
+ face_mask_blur =
41
+ face_mask_padding =
42
+
43
+ [frame_extraction]
44
+ trim_frame_start =
45
+ trim_frame_end =
46
+ temp_frame_format =
47
+ keep_temp =
48
+
49
+ [output_creation]
50
+ output_image_quality = 100
51
+ output_image_resolution =
52
+ output_audio_encoder =
53
+ output_audio_quality =
54
+ output_audio_volume =
55
+ output_video_encoder =
56
+ output_video_preset =
57
+ output_video_quality =
58
+ output_video_resolution =
59
+ output_video_fps =
60
+
61
+ [processors]
62
+ processors = face_swapper
63
+ age_modifier_model =
64
+ age_modifier_direction =
65
+ deep_swapper_model =
66
+ deep_swapper_morph =
67
+ expression_restorer_model =
68
+ expression_restorer_factor =
69
+ face_debugger_items =
70
+ face_editor_model =
71
+ face_editor_eyebrow_direction =
72
+ face_editor_eye_gaze_horizontal =
73
+ face_editor_eye_gaze_vertical =
74
+ face_editor_eye_open_ratio =
75
+ face_editor_lip_open_ratio =
76
+ face_editor_mouth_grim =
77
+ face_editor_mouth_pout =
78
+ face_editor_mouth_purse =
79
+ face_editor_mouth_smile =
80
+ face_editor_mouth_position_horizontal =
81
+ face_editor_mouth_position_vertical =
82
+ face_editor_head_pitch =
83
+ face_editor_head_yaw =
84
+ face_editor_head_roll =
85
+ face_enhancer_model = gpen_bfr_512
86
+ face_enhancer_blend = 70
87
+ face_enhancer_weight =
88
+ face_swapper_model = hyperswap_1b_256
89
+ face_swapper_pixel_boost =
90
+ frame_colorizer_model =
91
+ frame_colorizer_size =
92
+ frame_colorizer_blend =
93
+ frame_enhancer_model =
94
+ frame_enhancer_blend =
95
+ lip_syncer_model =
96
+ lip_syncer_weight =
97
+
98
+ [uis]
99
+ open_browser =
100
+ ui_layouts =
101
+ ui_workflow =
102
+
103
+ [download]
104
+ download_providers =
105
+ download_scope =
106
+
107
+ [benchmark]
108
+ benchmark_resolutions =
109
+ benchmark_cycle_count =
110
+
111
+ [execution]
112
+ execution_device_id =
113
+ execution_providers =
114
+ execution_thread_count =
115
+ execution_queue_count =
116
+
117
+ [memory]
118
+ video_memory_strategy =
119
+ system_memory_limit =
120
+
121
+ [misc]
122
+ log_level =
123
+ halt_on_error =
facefusion.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+
5
+ os.environ['OMP_NUM_THREADS'] = '1'
6
+
7
+ from facefusion import core
8
+
9
+ if __name__ == '__main__':
10
+ core.cli()
facefusion/__init__.py ADDED
File without changes
facefusion/app_context.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ from facefusion.types import AppContext
5
+
6
+
7
+ def detect_app_context() -> AppContext:
8
+ frame = sys._getframe(1)
9
+
10
+ while frame:
11
+ if os.path.join('facefusion', 'jobs') in frame.f_code.co_filename:
12
+ return 'cli'
13
+ if os.path.join('facefusion', 'uis') in frame.f_code.co_filename:
14
+ return 'ui'
15
+ frame = frame.f_back
16
+ return 'cli'
facefusion/args.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from facefusion import state_manager
2
+ from facefusion.filesystem import get_file_name, is_image, is_video, resolve_file_paths
3
+ from facefusion.jobs import job_store
4
+ from facefusion.normalizer import normalize_fps, normalize_padding
5
+ from facefusion.processors.core import get_processors_modules
6
+ from facefusion.types import ApplyStateItem, Args
7
+ from facefusion.vision import create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, pack_resolution
8
+
9
+
10
+ def reduce_step_args(args : Args) -> Args:
11
+ step_args =\
12
+ {
13
+ key: args[key] for key in args if key in job_store.get_step_keys()
14
+ }
15
+ return step_args
16
+
17
+
18
+ def reduce_job_args(args : Args) -> Args:
19
+ job_args =\
20
+ {
21
+ key: args[key] for key in args if key in job_store.get_job_keys()
22
+ }
23
+ return job_args
24
+
25
+
26
+ def collect_step_args() -> Args:
27
+ from facefusion.uis.components import face_selector # Добавьте этот импорт
28
+ step_args =\
29
+ {
30
+ key: state_manager.get_item(key) for key in job_store.get_step_keys() #type:ignore[arg-type]
31
+ }
32
+ step_args.update(face_selector.get_step_args()) # Добавьте эту строку для слияния ключей из face_selector
33
+ return step_args
34
+
35
+
36
+ def collect_job_args() -> Args:
37
+ job_args =\
38
+ {
39
+ key: state_manager.get_item(key) for key in job_store.get_job_keys() #type:ignore[arg-type]
40
+ }
41
+ return job_args
42
+
43
+
44
+ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
45
+ # general
46
+ apply_state_item('command', args.get('command'))
47
+ # paths
48
+ apply_state_item('temp_path', args.get('temp_path'))
49
+ apply_state_item('jobs_path', args.get('jobs_path'))
50
+ apply_state_item('source_paths', args.get('source_paths'))
51
+ apply_state_item('target_path', args.get('target_path'))
52
+ apply_state_item('output_path', args.get('output_path'))
53
+ # patterns
54
+ apply_state_item('source_pattern', args.get('source_pattern'))
55
+ apply_state_item('target_pattern', args.get('target_pattern'))
56
+ apply_state_item('output_pattern', args.get('output_pattern'))
57
+ # face detector
58
+ apply_state_item('face_detector_model', args.get('face_detector_model'))
59
+ apply_state_item('face_detector_size', args.get('face_detector_size'))
60
+ apply_state_item('face_detector_angles', args.get('face_detector_angles'))
61
+ apply_state_item('face_detector_score', args.get('face_detector_score'))
62
+ # face landmarker
63
+ apply_state_item('face_landmarker_model', args.get('face_landmarker_model'))
64
+ apply_state_item('face_landmarker_score', args.get('face_landmarker_score'))
65
+ # face selector
66
+ apply_state_item('face_selector_mode', args.get('face_selector_mode'))
67
+ apply_state_item('face_selector_order', args.get('face_selector_order'))
68
+ apply_state_item('face_selector_age_start', args.get('face_selector_age_start'))
69
+ apply_state_item('face_selector_age_end', args.get('face_selector_age_end'))
70
+ apply_state_item('face_selector_gender', args.get('face_selector_gender'))
71
+ apply_state_item('face_selector_race', args.get('face_selector_race'))
72
+ apply_state_item('reference_face_position', args.get('reference_face_position'))
73
+ apply_state_item('reference_face_distance', args.get('reference_face_distance'))
74
+ apply_state_item('reference_frame_number', args.get('reference_frame_number'))
75
+ # face masker
76
+ apply_state_item('face_occluder_model', args.get('face_occluder_model'))
77
+ apply_state_item('face_parser_model', args.get('face_parser_model'))
78
+ apply_state_item('face_mask_types', args.get('face_mask_types'))
79
+ apply_state_item('face_mask_areas', args.get('face_mask_areas'))
80
+ apply_state_item('face_mask_regions', args.get('face_mask_regions'))
81
+ apply_state_item('face_mask_blur', args.get('face_mask_blur'))
82
+ apply_state_item('face_mask_padding', normalize_padding(args.get('face_mask_padding')))
83
+ # frame extraction
84
+ apply_state_item('trim_frame_start', args.get('trim_frame_start'))
85
+ apply_state_item('trim_frame_end', args.get('trim_frame_end'))
86
+ apply_state_item('temp_frame_format', args.get('temp_frame_format'))
87
+ apply_state_item('keep_temp', args.get('keep_temp'))
88
+ # output creation
89
+ apply_state_item('output_image_quality', args.get('output_image_quality'))
90
+ if is_image(args.get('target_path')):
91
+ output_image_resolution = detect_image_resolution(args.get('target_path'))
92
+ output_image_resolutions = create_image_resolutions(output_image_resolution)
93
+ if args.get('output_image_resolution') in output_image_resolutions:
94
+ apply_state_item('output_image_resolution', args.get('output_image_resolution'))
95
+ else:
96
+ apply_state_item('output_image_resolution', pack_resolution(output_image_resolution))
97
+ apply_state_item('output_audio_encoder', args.get('output_audio_encoder'))
98
+ apply_state_item('output_audio_quality', args.get('output_audio_quality'))
99
+ apply_state_item('output_audio_volume', args.get('output_audio_volume'))
100
+ apply_state_item('output_video_encoder', args.get('output_video_encoder'))
101
+ apply_state_item('output_video_preset', args.get('output_video_preset'))
102
+ apply_state_item('output_video_quality', args.get('output_video_quality'))
103
+ if is_video(args.get('target_path')):
104
+ output_video_resolution = detect_video_resolution(args.get('target_path'))
105
+ output_video_resolutions = create_video_resolutions(output_video_resolution)
106
+ if args.get('output_video_resolution') in output_video_resolutions:
107
+ apply_state_item('output_video_resolution', args.get('output_video_resolution'))
108
+ else:
109
+ apply_state_item('output_video_resolution', pack_resolution(output_video_resolution))
110
+ if args.get('output_video_fps') or is_video(args.get('target_path')):
111
+ output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path'))
112
+ apply_state_item('output_video_fps', output_video_fps)
113
+ # processors
114
+ available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
115
+ apply_state_item('processors', args.get('processors'))
116
+ for processor_module in get_processors_modules(available_processors):
117
+ processor_module.apply_args(args, apply_state_item)
118
+ # uis
119
+ apply_state_item('open_browser', args.get('open_browser'))
120
+ apply_state_item('ui_layouts', args.get('ui_layouts'))
121
+ apply_state_item('ui_workflow', args.get('ui_workflow'))
122
+
123
+ ### # Добавляем обработку gui_type
124
+ if args.get('gui_type') is not None:
125
+ apply_state_item('gui_type', args.get('gui_type'))
126
+ # Сохраняем предпочтение
127
+ from facefusion.ui_preference import save_ui_preference
128
+ save_ui_preference(args.get('gui_type'))
129
+ else:
130
+ # Загружаем сохраненное предпочтение
131
+ from facefusion.ui_preference import get_ui_preference
132
+ apply_state_item('gui_type', get_ui_preference())
133
+
134
+ # execution
135
+ apply_state_item('execution_device_id', args.get('execution_device_id'))
136
+ apply_state_item('execution_providers', args.get('execution_providers'))
137
+ apply_state_item('execution_thread_count', args.get('execution_thread_count'))
138
+ apply_state_item('execution_queue_count', args.get('execution_queue_count'))
139
+ # download
140
+ apply_state_item('download_providers', args.get('download_providers'))
141
+ apply_state_item('download_scope', args.get('download_scope'))
142
+ # benchmark
143
+ apply_state_item('benchmark_resolutions', args.get('benchmark_resolutions'))
144
+ apply_state_item('benchmark_cycle_count', args.get('benchmark_cycle_count'))
145
+ # memory
146
+ apply_state_item('video_memory_strategy', args.get('video_memory_strategy'))
147
+ apply_state_item('system_memory_limit', args.get('system_memory_limit'))
148
+ # misc
149
+ apply_state_item('log_level', args.get('log_level'))
150
+ apply_state_item('halt_on_error', args.get('halt_on_error'))
151
+ # jobs
152
+ apply_state_item('job_id', args.get('job_id'))
153
+ apply_state_item('job_status', args.get('job_status'))
154
+ apply_state_item('step_index', args.get('step_index'))
facefusion/audio.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import Any, List, Optional
3
+
4
+ import numpy
5
+ import scipy
6
+ from numpy.typing import NDArray
7
+
8
+ from facefusion.ffmpeg import read_audio_buffer
9
+ from facefusion.filesystem import is_audio
10
+ from facefusion.types import Audio, AudioFrame, Fps, Mel, MelFilterBank, Spectrogram
11
+ from facefusion.voice_extractor import batch_extract_voice
12
+
13
+
14
+ @lru_cache()
15
+ def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
16
+ return read_audio(audio_path, fps)
17
+
18
+
19
+ def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
20
+ audio_sample_rate = 48000
21
+ audio_sample_size = 16
22
+ audio_channel_total = 2
23
+
24
+ if is_audio(audio_path):
25
+ audio_buffer = read_audio_buffer(audio_path, audio_sample_rate, audio_sample_size, audio_channel_total)
26
+ audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
27
+ audio = prepare_audio(audio)
28
+ spectrogram = create_spectrogram(audio)
29
+ audio_frames = extract_audio_frames(spectrogram, fps)
30
+ return audio_frames
31
+ return None
32
+
33
+
34
+ @lru_cache()
35
+ def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
36
+ return read_voice(audio_path, fps)
37
+
38
+
39
+ def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
40
+ voice_sample_rate = 48000
41
+ voice_sample_size = 16
42
+ voice_channel_total = 2
43
+ voice_chunk_size = 240 * 1024
44
+ voice_step_size = 180 * 1024
45
+
46
+ if is_audio(audio_path):
47
+ audio_buffer = read_audio_buffer(audio_path, voice_sample_rate, voice_sample_size, voice_channel_total)
48
+ audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
49
+ audio = batch_extract_voice(audio, voice_chunk_size, voice_step_size)
50
+ audio = prepare_voice(audio)
51
+ spectrogram = create_spectrogram(audio)
52
+ audio_frames = extract_audio_frames(spectrogram, fps)
53
+ return audio_frames
54
+ return None
55
+
56
+
57
+ def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]:
58
+ if is_audio(audio_path):
59
+ audio_frames = read_static_audio(audio_path, fps)
60
+ if frame_number in range(len(audio_frames)):
61
+ return audio_frames[frame_number]
62
+ return None
63
+
64
+
65
+ def extract_audio_frames(spectrogram : Spectrogram, fps : Fps) -> List[AudioFrame]:
66
+ audio_frames = []
67
+ mel_filter_total = 80
68
+ audio_step_size = 16
69
+ indices = numpy.arange(0, spectrogram.shape[1], mel_filter_total / fps).astype(numpy.int16)
70
+ indices = indices[indices >= audio_step_size]
71
+
72
+ for index in indices:
73
+ start = max(0, index - audio_step_size)
74
+ audio_frames.append(spectrogram[:, start:index])
75
+
76
+ return audio_frames
77
+
78
+
79
+ def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]:
80
+ if is_audio(audio_path):
81
+ voice_frames = read_static_voice(audio_path, fps)
82
+ if frame_number in range(len(voice_frames)):
83
+ return voice_frames[frame_number]
84
+ return None
85
+
86
+
87
+ def create_empty_audio_frame() -> AudioFrame:
88
+ mel_filter_total = 80
89
+ audio_step_size = 16
90
+ audio_frame = numpy.zeros((mel_filter_total, audio_step_size)).astype(numpy.int16)
91
+ return audio_frame
92
+
93
+
94
+ def prepare_audio(audio : Audio) -> Audio:
95
+ if audio.ndim > 1:
96
+ audio = numpy.mean(audio, axis = 1)
97
+ audio = audio / numpy.max(numpy.abs(audio), axis = 0)
98
+ audio = scipy.signal.lfilter([ 1.0, -0.97 ], [ 1.0 ], audio)
99
+ return audio
100
+
101
+
102
+ def prepare_voice(audio : Audio) -> Audio:
103
+ audio_sample_rate = 48000
104
+ audio_resample_rate = 16000
105
+ audio_resample_factor = round(len(audio) * audio_resample_rate / audio_sample_rate)
106
+ audio = scipy.signal.resample(audio, audio_resample_factor)
107
+ audio = prepare_audio(audio)
108
+ return audio
109
+
110
+
111
+ def convert_hertz_to_mel(hertz : float) -> float:
112
+ return 2595 * numpy.log10(1 + hertz / 700)
113
+
114
+
115
+ def convert_mel_to_hertz(mel : Mel) -> NDArray[Any]:
116
+ return 700 * (10 ** (mel / 2595) - 1)
117
+
118
+
119
+ def create_mel_filter_bank() -> MelFilterBank:
120
+ audio_sample_rate = 16000
121
+ audio_min_frequency = 55.0
122
+ audio_max_frequency = 7600.0
123
+ mel_filter_total = 80
124
+ mel_bin_total = 800
125
+ mel_filter_bank = numpy.zeros((mel_filter_total, mel_bin_total // 2 + 1))
126
+ mel_frequency_range = numpy.linspace(convert_hertz_to_mel(audio_min_frequency), convert_hertz_to_mel(audio_max_frequency), mel_filter_total + 2)
127
+ indices = numpy.floor((mel_bin_total + 1) * convert_mel_to_hertz(mel_frequency_range) / audio_sample_rate).astype(numpy.int16)
128
+
129
+ for index in range(mel_filter_total):
130
+ start = indices[index]
131
+ end = indices[index + 1]
132
+ mel_filter_bank[index, start:end] = scipy.signal.windows.triang(end - start)
133
+
134
+ return mel_filter_bank
135
+
136
+
137
+ def create_spectrogram(audio : Audio) -> Spectrogram:
138
+ mel_bin_total = 800
139
+ mel_bin_overlap = 600
140
+ mel_filter_bank = create_mel_filter_bank()
141
+ spectrogram = scipy.signal.stft(audio, nperseg = mel_bin_total, nfft = mel_bin_total, noverlap = mel_bin_overlap)[2]
142
+ spectrogram = numpy.dot(mel_filter_bank, numpy.abs(spectrogram))
143
+ return spectrogram
facefusion/benchmarker.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import statistics
4
+ import tempfile
5
+ from time import perf_counter
6
+ from typing import Generator, List
7
+
8
+ import facefusion.choices
9
+ from facefusion import core, state_manager
10
+ from facefusion.cli_helper import render_table
11
+ from facefusion.download import conditional_download, resolve_download_url
12
+ from facefusion.filesystem import get_file_extension
13
+ from facefusion.types import BenchmarkCycleSet
14
+ from facefusion.vision import count_video_frame_total, detect_video_fps, detect_video_resolution, pack_resolution
15
+
16
+
17
+ def pre_check() -> bool:
18
+ conditional_download('.assets/examples',
19
+ [
20
+ resolve_download_url('examples-3.0.0', 'source.jpg'),
21
+ resolve_download_url('examples-3.0.0', 'source.mp3'),
22
+ resolve_download_url('examples-3.0.0', 'target-240p.mp4'),
23
+ resolve_download_url('examples-3.0.0', 'target-360p.mp4'),
24
+ resolve_download_url('examples-3.0.0', 'target-540p.mp4'),
25
+ resolve_download_url('examples-3.0.0', 'target-720p.mp4'),
26
+ resolve_download_url('examples-3.0.0', 'target-1080p.mp4'),
27
+ resolve_download_url('examples-3.0.0', 'target-1440p.mp4'),
28
+ resolve_download_url('examples-3.0.0', 'target-2160p.mp4')
29
+ ])
30
+ return True
31
+
32
+
33
+ def run() -> Generator[List[BenchmarkCycleSet], None, None]:
34
+ benchmark_resolutions = state_manager.get_item('benchmark_resolutions')
35
+ benchmark_cycle_count = state_manager.get_item('benchmark_cycle_count')
36
+
37
+ state_manager.init_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ])
38
+ state_manager.init_item('face_landmarker_score', 0)
39
+ state_manager.init_item('temp_frame_format', 'bmp')
40
+ state_manager.init_item('output_audio_volume', 0)
41
+ state_manager.init_item('output_video_preset', 'ultrafast')
42
+ state_manager.init_item('video_memory_strategy', 'tolerant')
43
+
44
+ benchmarks = []
45
+ target_paths = [facefusion.choices.benchmark_set.get(benchmark_resolution) for benchmark_resolution in benchmark_resolutions if benchmark_resolution in facefusion.choices.benchmark_set]
46
+
47
+ for target_path in target_paths:
48
+ state_manager.set_item('target_path', target_path)
49
+ state_manager.set_item('output_path', suggest_output_path(state_manager.get_item('target_path')))
50
+ benchmarks.append(cycle(benchmark_cycle_count))
51
+ yield benchmarks
52
+
53
+
54
+ def cycle(cycle_count : int) -> BenchmarkCycleSet:
55
+ process_times = []
56
+ video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
57
+ output_video_resolution = detect_video_resolution(state_manager.get_item('target_path'))
58
+ state_manager.set_item('output_video_resolution', pack_resolution(output_video_resolution))
59
+ state_manager.set_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path')))
60
+
61
+ core.conditional_process()
62
+
63
+ for index in range(cycle_count):
64
+ start_time = perf_counter()
65
+ core.conditional_process()
66
+ end_time = perf_counter()
67
+ process_times.append(end_time - start_time)
68
+
69
+ average_run = round(statistics.mean(process_times), 2)
70
+ fastest_run = round(min(process_times), 2)
71
+ slowest_run = round(max(process_times), 2)
72
+ relative_fps = round(video_frame_total * cycle_count / sum(process_times), 2)
73
+
74
+ return\
75
+ {
76
+ 'target_path': state_manager.get_item('target_path'),
77
+ 'cycle_count': cycle_count,
78
+ 'average_run': average_run,
79
+ 'fastest_run': fastest_run,
80
+ 'slowest_run': slowest_run,
81
+ 'relative_fps': relative_fps
82
+ }
83
+
84
+
85
+ def suggest_output_path(target_path : str) -> str:
86
+ target_file_extension = get_file_extension(target_path)
87
+ return os.path.join(tempfile.gettempdir(), hashlib.sha1().hexdigest()[:8] + target_file_extension)
88
+
89
+
90
+ def render() -> None:
91
+ benchmarks = []
92
+ headers =\
93
+ [
94
+ 'target_path',
95
+ 'cycle_count',
96
+ 'average_run',
97
+ 'fastest_run',
98
+ 'slowest_run',
99
+ 'relative_fps'
100
+ ]
101
+
102
+ for benchmark in run():
103
+ benchmarks = benchmark
104
+
105
+ contents = [ list(benchmark_set.values()) for benchmark_set in benchmarks ]
106
+ render_table(headers, contents)
facefusion/choices.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import List, Sequence
3
+
4
+ from facefusion.common_helper import create_float_range, create_int_range
5
+ from facefusion.types import Angle, AudioEncoder, AudioFormat, AudioTypeSet, BenchmarkResolution, BenchmarkSet, DownloadProvider, DownloadProviderSet, DownloadScope, EncoderSet, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskArea, FaceMaskAreaSet, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, ImageFormat, ImageTypeSet, JobStatus, LogLevel, LogLevelSet, Race, Score, TempFrameFormat, UiWorkflow, VideoEncoder, VideoFormat, VideoMemoryStrategy, VideoPreset, VideoTypeSet, WebcamMode
6
+
7
+ face_detector_set : FaceDetectorSet =\
8
+ {
9
+ 'many': [ '640x640' ],
10
+ 'retinaface': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
11
+ 'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
12
+ 'yolo_face': [ '640x640' ]
13
+ }
14
+ face_detector_models : List[FaceDetectorModel] = list(face_detector_set.keys())
15
+ face_landmarker_models : List[FaceLandmarkerModel] = [ 'many', '2dfan4', 'peppa_wutz' ]
16
+ face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ]
17
+ face_selector_orders : List[FaceSelectorOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'large-small' ] # 'small-large', 'best-worst', 'worst-best'
18
+ face_selector_genders : List[Gender] = [ 'female', 'male' ]
19
+ face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ]
20
+ face_occluder_models : List[FaceOccluderModel] = [ 'xseg_1', 'xseg_2', 'xseg_3' ]
21
+ face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ]
22
+ face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'area', 'region' ]
23
+ face_mask_area_set : FaceMaskAreaSet =\
24
+ {
25
+ 'upper-face': [ 0, 1, 2, 31, 32, 33, 34, 35, 14, 15, 16, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17 ],
26
+ 'lower-face': [ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 35, 34, 33, 32, 31 ],
27
+ 'mouth': [ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67 ]
28
+ }
29
+ face_mask_region_set : FaceMaskRegionSet =\
30
+ {
31
+ 'skin': 1,
32
+ 'left-eyebrow': 2,
33
+ 'right-eyebrow': 3,
34
+ 'left-eye': 4,
35
+ 'right-eye': 5,
36
+ 'glasses': 6,
37
+ 'nose': 10,
38
+ 'mouth': 11,
39
+ 'upper-lip': 12,
40
+ 'lower-lip': 13
41
+ }
42
+ face_mask_areas : List[FaceMaskArea] = list(face_mask_area_set.keys())
43
+ face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys())
44
+
45
+ audio_type_set : AudioTypeSet =\
46
+ {
47
+ 'flac': 'audio/flac',
48
+ 'm4a': 'audio/mp4',
49
+ 'mp3': 'audio/mpeg',
50
+ 'ogg': 'audio/ogg',
51
+ 'opus': 'audio/opus',
52
+ 'wav': 'audio/x-wav'
53
+ }
54
+ image_type_set : ImageTypeSet =\
55
+ {
56
+ 'bmp': 'image/bmp',
57
+ 'jpeg': 'image/jpeg',
58
+ 'png': 'image/png',
59
+ 'tiff': 'image/tiff',
60
+ 'webp': 'image/webp'
61
+ }
62
+ video_type_set : VideoTypeSet =\
63
+ {
64
+ 'avi': 'video/x-msvideo',
65
+ 'm4v': 'video/mp4',
66
+ 'mkv': 'video/x-matroska',
67
+ 'mp4': 'video/mp4',
68
+ 'mov': 'video/quicktime',
69
+ 'webm': 'video/webm'
70
+ }
71
+ audio_formats : List[AudioFormat] = list(audio_type_set.keys())
72
+ image_formats : List[ImageFormat] = list(image_type_set.keys())
73
+ video_formats : List[VideoFormat] = list(video_type_set.keys())
74
+ temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpeg', 'png', 'tiff' ]
75
+
76
+ output_encoder_set : EncoderSet =\
77
+ {
78
+ 'audio': [ 'flac', 'aac', 'libmp3lame', 'libopus', 'libvorbis', 'pcm_s16le', 'pcm_s32le' ],
79
+ 'video': [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox', 'rawvideo' ]
80
+ }
81
+ output_audio_encoders : List[AudioEncoder] = output_encoder_set.get('audio')
82
+ output_video_encoders : List[VideoEncoder] = output_encoder_set.get('video')
83
+ output_video_presets : List[VideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]
84
+
85
+ image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ]
86
+ video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ]
87
+
88
+ benchmark_set : BenchmarkSet =\
89
+ {
90
+ '240p': '.assets/examples/target-240p.mp4',
91
+ '360p': '.assets/examples/target-360p.mp4',
92
+ '540p': '.assets/examples/target-540p.mp4',
93
+ '720p': '.assets/examples/target-720p.mp4',
94
+ '1080p': '.assets/examples/target-1080p.mp4',
95
+ '1440p': '.assets/examples/target-1440p.mp4',
96
+ '2160p': '.assets/examples/target-2160p.mp4'
97
+ }
98
+ benchmark_resolutions : List[BenchmarkResolution] = list(benchmark_set.keys())
99
+
100
+ webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
101
+ webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
102
+
103
+ execution_provider_set : ExecutionProviderSet =\
104
+ {
105
+ 'cuda': 'CUDAExecutionProvider',
106
+ 'tensorrt': 'TensorrtExecutionProvider',
107
+ 'directml': 'DmlExecutionProvider',
108
+ 'rocm': 'ROCMExecutionProvider',
109
+ 'openvino': 'OpenVINOExecutionProvider',
110
+ 'coreml': 'CoreMLExecutionProvider',
111
+ 'cpu': 'CPUExecutionProvider'
112
+ }
113
+ execution_providers : List[ExecutionProvider] = list(execution_provider_set.keys())
114
+ download_provider_set : DownloadProviderSet =\
115
+ {
116
+ 'github':
117
+ {
118
+ 'urls':
119
+ [
120
+ 'https://github.com'
121
+ ],
122
+ 'path': '/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}'
123
+ },
124
+ 'huggingface':
125
+ {
126
+ 'urls':
127
+ [
128
+ 'https://huggingface.co',
129
+ 'https://hf-mirror.com'
130
+ ],
131
+ 'path': '/facefusion/{base_name}/resolve/main/{file_name}'
132
+ }
133
+ }
134
+ download_providers : List[DownloadProvider] = list(download_provider_set.keys())
135
+ download_scopes : List[DownloadScope] = [ 'lite', 'full' ]
136
+
137
+ video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ]
138
+
139
+ log_level_set : LogLevelSet =\
140
+ {
141
+ 'error': logging.ERROR,
142
+ 'warn': logging.WARNING,
143
+ 'info': logging.INFO,
144
+ 'debug': logging.DEBUG
145
+ }
146
+ log_levels : List[LogLevel] = list(log_level_set.keys())
147
+
148
+ ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ]
149
+ job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ]
150
+
151
+ benchmark_cycle_count_range : Sequence[int] = create_int_range(1, 10, 1)
152
+ execution_thread_count_range : Sequence[int] = create_int_range(1, 32, 1)
153
+ execution_queue_count_range : Sequence[int] = create_int_range(1, 4, 1)
154
+ system_memory_limit_range : Sequence[int] = create_int_range(0, 128, 4)
155
+ face_detector_angles : Sequence[Angle] = create_int_range(0, 270, 90)
156
+ face_detector_score_range : Sequence[Score] = create_float_range(0.0, 1.0, 0.05)
157
+ face_landmarker_score_range : Sequence[Score] = create_float_range(0.0, 1.0, 0.05)
158
+ face_mask_blur_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
159
+ face_mask_padding_range : Sequence[int] = create_int_range(0, 100, 1)
160
+ face_selector_age_range : Sequence[int] = create_int_range(0, 100, 1)
161
+ reference_face_distance_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
162
+ output_image_quality_range : Sequence[int] = create_int_range(0, 100, 1)
163
+ output_audio_quality_range : Sequence[int] = create_int_range(0, 100, 1)
164
+ output_audio_volume_range : Sequence[int] = create_int_range(0, 100, 1)
165
+ output_video_quality_range : Sequence[int] = create_int_range(0, 100, 1)
facefusion/cli_helper.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ from facefusion.logger import get_package_logger
4
+ from facefusion.types import TableContents, TableHeaders
5
+
6
+
7
+ def render_table(headers : TableHeaders, contents : TableContents) -> None:
8
+ package_logger = get_package_logger()
9
+ table_column, table_separator = create_table_parts(headers, contents)
10
+
11
+ package_logger.critical(table_separator)
12
+ package_logger.critical(table_column.format(*headers))
13
+ package_logger.critical(table_separator)
14
+
15
+ for content in contents:
16
+ content = [ str(value) for value in content ]
17
+ package_logger.critical(table_column.format(*content))
18
+
19
+ package_logger.critical(table_separator)
20
+
21
+
22
+ def create_table_parts(headers : TableHeaders, contents : TableContents) -> Tuple[str, str]:
23
+ column_parts = []
24
+ separator_parts = []
25
+ widths = [ len(header) for header in headers ]
26
+
27
+ for content in contents:
28
+ for index, value in enumerate(content):
29
+ widths[index] = max(widths[index], len(str(value)))
30
+
31
+ for width in widths:
32
+ column_parts.append('{:<' + str(width) + '}')
33
+ separator_parts.append('-' * width)
34
+
35
+ return '| ' + ' | '.join(column_parts) + ' |', '+-' + '-+-'.join(separator_parts) + '-+'
facefusion/common_helper.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ from typing import Any, Iterable, Optional, Reversible, Sequence
3
+
4
+
5
+ def is_linux() -> bool:
6
+ return platform.system().lower() == 'linux'
7
+
8
+
9
+ def is_macos() -> bool:
10
+ return platform.system().lower() == 'darwin'
11
+
12
+
13
+ def is_windows() -> bool:
14
+ return platform.system().lower() == 'windows'
15
+
16
+
17
+ def create_int_metavar(int_range : Sequence[int]) -> str:
18
+ return '[' + str(int_range[0]) + '..' + str(int_range[-1]) + ':' + str(calc_int_step(int_range)) + ']'
19
+
20
+
21
+ def create_float_metavar(float_range : Sequence[float]) -> str:
22
+ return '[' + str(float_range[0]) + '..' + str(float_range[-1]) + ':' + str(calc_float_step(float_range)) + ']'
23
+
24
+
25
+ def create_int_range(start : int, end : int, step : int) -> Sequence[int]:
26
+ int_range = []
27
+ current = start
28
+
29
+ while current <= end:
30
+ int_range.append(current)
31
+ current += step
32
+ return int_range
33
+
34
+
35
+ def create_float_range(start : float, end : float, step : float) -> Sequence[float]:
36
+ float_range = []
37
+ current = start
38
+
39
+ while current <= end:
40
+ float_range.append(round(current, 2))
41
+ current = round(current + step, 2)
42
+ return float_range
43
+
44
+
45
+ def calc_int_step(int_range : Sequence[int]) -> int:
46
+ return int_range[1] - int_range[0]
47
+
48
+
49
+ def calc_float_step(float_range : Sequence[float]) -> float:
50
+ return round(float_range[1] - float_range[0], 2)
51
+
52
+
53
+ def cast_int(value : Any) -> Optional[int]:
54
+ try:
55
+ return int(value)
56
+ except (ValueError, TypeError):
57
+ return None
58
+
59
+
60
+ def cast_float(value : Any) -> Optional[float]:
61
+ try:
62
+ return float(value)
63
+ except (ValueError, TypeError):
64
+ return None
65
+
66
+
67
+ def cast_bool(value : Any) -> Optional[bool]:
68
+ if value == 'True':
69
+ return True
70
+ if value == 'False':
71
+ return False
72
+ return None
73
+
74
+
75
+ def get_first(__list__ : Any) -> Any:
76
+ if isinstance(__list__, Iterable):
77
+ return next(iter(__list__), None)
78
+ return None
79
+
80
+
81
+ def get_last(__list__ : Any) -> Any:
82
+ if isinstance(__list__, Reversible):
83
+ return next(reversed(__list__), None)
84
+ return None
facefusion/config.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configparser import ConfigParser
2
+ from typing import List, Optional
3
+
4
+ from facefusion import state_manager
5
+ from facefusion.common_helper import cast_bool, cast_float, cast_int
6
+
7
+ CONFIG_PARSER = None
8
+
9
+
10
+ def get_config_parser() -> ConfigParser:
11
+ global CONFIG_PARSER
12
+
13
+ if CONFIG_PARSER is None:
14
+ CONFIG_PARSER = ConfigParser()
15
+ CONFIG_PARSER.read(state_manager.get_item('config_path'), encoding = 'utf-8')
16
+ return CONFIG_PARSER
17
+
18
+
19
+ def clear_config_parser() -> None:
20
+ global CONFIG_PARSER
21
+
22
+ CONFIG_PARSER = None
23
+
24
+
25
+ def get_str_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[str]:
26
+ config_parser = get_config_parser()
27
+
28
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
29
+ return config_parser.get(section, option)
30
+ return fallback
31
+
32
+
33
+ def get_int_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[int]:
34
+ config_parser = get_config_parser()
35
+
36
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
37
+ return config_parser.getint(section, option)
38
+ return cast_int(fallback)
39
+
40
+
41
+ def get_float_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[float]:
42
+ config_parser = get_config_parser()
43
+
44
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
45
+ return config_parser.getfloat(section, option)
46
+ return cast_float(fallback)
47
+
48
+
49
+ def get_bool_value(section : str, option : str, fallback : Optional[str] = None) -> Optional[bool]:
50
+ config_parser = get_config_parser()
51
+
52
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
53
+ return config_parser.getboolean(section, option)
54
+ return cast_bool(fallback)
55
+
56
+
57
+ def get_str_list(section : str, option : str, fallback : Optional[str] = None) -> Optional[List[str]]:
58
+ config_parser = get_config_parser()
59
+
60
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
61
+ return config_parser.get(section, option).split()
62
+ if fallback:
63
+ return fallback.split()
64
+ return None
65
+
66
+
67
+ def get_int_list(section : str, option : str, fallback : Optional[str] = None) -> Optional[List[int]]:
68
+ config_parser = get_config_parser()
69
+
70
+ if config_parser.has_option(section, option) and config_parser.get(section, option).strip():
71
+ return list(map(int, config_parser.get(section, option).split()))
72
+ if fallback:
73
+ return list(map(int, fallback.split()))
74
+ return None
facefusion/content_analyser.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Tuple
3
+
4
+ import numpy
5
+ from tqdm import tqdm
6
+
7
+ from facefusion import inference_manager, state_manager, wording
8
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
9
+ from facefusion.execution import has_execution_provider
10
+ from facefusion.filesystem import resolve_relative_path
11
+ from facefusion.thread_helper import conditional_thread_semaphore
12
+ from facefusion.types import Detection, DownloadScope, DownloadSet, ExecutionProvider, Fps, InferencePool, ModelSet, VisionFrame
13
+ from facefusion.vision import detect_video_fps, fit_frame, read_image, read_video_frame
14
+
15
+ STREAM_COUNTER = 0
16
+
17
+
18
+ @lru_cache(maxsize = None)
19
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
20
+ return\
21
+ {
22
+ 'nsfw_1':
23
+ {
24
+ 'hashes':
25
+ {
26
+ 'content_analyser':
27
+ {
28
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_1.hash'),
29
+ 'path': resolve_relative_path('../.assets/models/nsfw_1.hash')
30
+ }
31
+ },
32
+ 'sources':
33
+ {
34
+ 'content_analyser':
35
+ {
36
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_1.onnx'),
37
+ 'path': resolve_relative_path('../.assets/models/nsfw_1.onnx')
38
+ }
39
+ },
40
+ 'size': (640, 640),
41
+ 'mean': (0.0, 0.0, 0.0),
42
+ 'standard_deviation': (1.0, 1.0, 1.0)
43
+ },
44
+ 'nsfw_2':
45
+ {
46
+ 'hashes':
47
+ {
48
+ 'content_analyser':
49
+ {
50
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_2.hash'),
51
+ 'path': resolve_relative_path('../.assets/models/nsfw_2.hash')
52
+ }
53
+ },
54
+ 'sources':
55
+ {
56
+ 'content_analyser':
57
+ {
58
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_2.onnx'),
59
+ 'path': resolve_relative_path('../.assets/models/nsfw_2.onnx')
60
+ }
61
+ },
62
+ 'size': (384, 384),
63
+ 'mean': (0.5, 0.5, 0.5),
64
+ 'standard_deviation': (0.5, 0.5, 0.5)
65
+ },
66
+ 'nsfw_3':
67
+ {
68
+ 'hashes':
69
+ {
70
+ 'content_analyser':
71
+ {
72
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_3.hash'),
73
+ 'path': resolve_relative_path('../.assets/models/nsfw_3.hash')
74
+ }
75
+ },
76
+ 'sources':
77
+ {
78
+ 'content_analyser':
79
+ {
80
+ 'url': resolve_download_url('models-3.3.0', 'nsfw_3.onnx'),
81
+ 'path': resolve_relative_path('../.assets/models/nsfw_3.onnx')
82
+ }
83
+ },
84
+ 'size': (448, 448),
85
+ 'mean': (0.48145466, 0.4578275, 0.40821073),
86
+ 'standard_deviation': (0.26862954, 0.26130258, 0.27577711)
87
+ }
88
+ }
89
+
90
+
91
+ def get_inference_pool() -> InferencePool:
92
+ model_names = [ 'nsfw_1', 'nsfw_2', 'nsfw_3' ]
93
+ _, model_source_set = collect_model_downloads()
94
+
95
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
96
+
97
+
98
+ def clear_inference_pool() -> None:
99
+ model_names = [ 'nsfw_1', 'nsfw_2', 'nsfw_3' ]
100
+ inference_manager.clear_inference_pool(__name__, model_names)
101
+
102
+
103
+ def resolve_execution_providers() -> List[ExecutionProvider]:
104
+ if has_execution_provider('coreml'):
105
+ return [ 'cpu' ]
106
+ return state_manager.get_item('execution_providers')
107
+
108
+
109
+ def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
110
+ model_set = create_static_model_set('full')
111
+ model_hash_set = {}
112
+ model_source_set = {}
113
+
114
+ for content_analyser_model in [ 'nsfw_1', 'nsfw_2', 'nsfw_3' ]:
115
+ model_hash_set[content_analyser_model] = model_set.get(content_analyser_model).get('hashes').get('content_analyser')
116
+ model_source_set[content_analyser_model] = model_set.get(content_analyser_model).get('sources').get('content_analyser')
117
+
118
+ return model_hash_set, model_source_set
119
+
120
+
121
+ def pre_check() -> bool:
122
+ model_hash_set, model_source_set = collect_model_downloads()
123
+
124
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
125
+
126
+
127
+ def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool:
128
+ global STREAM_COUNTER
129
+
130
+ STREAM_COUNTER = STREAM_COUNTER + 1
131
+ if STREAM_COUNTER % int(video_fps) == 0:
132
+ return analyse_frame(vision_frame)
133
+ return False
134
+
135
+
136
+ def analyse_frame(vision_frame : VisionFrame) -> bool:
137
+ return detect_nsfw(vision_frame)
138
+
139
+
140
+ @lru_cache(maxsize = None)
141
+ def analyse_image(image_path : str) -> bool:
142
+ vision_frame = read_image(image_path)
143
+ return analyse_frame(vision_frame)
144
+
145
+
146
+ @lru_cache(maxsize = None)
147
+ def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int) -> bool:
148
+ video_fps = detect_video_fps(video_path)
149
+ frame_range = range(trim_frame_start, trim_frame_end)
150
+ rate = 0.0
151
+ total = 0
152
+ counter = 0
153
+
154
+ with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
155
+
156
+ for frame_number in frame_range:
157
+ if frame_number % int(video_fps) == 0:
158
+ vision_frame = read_video_frame(video_path, frame_number)
159
+ total += 1
160
+ if analyse_frame(vision_frame):
161
+ counter += 1
162
+ if counter > 0 and total > 0:
163
+ rate = counter / total * 100
164
+ progress.set_postfix(rate = rate)
165
+ progress.update()
166
+
167
+ return bool(rate > 10.0)
168
+
169
+
170
+ def detect_nsfw(vision_frame : VisionFrame) -> bool:
171
+ return False
172
+
173
+
174
+ def detect_with_nsfw_1(vision_frame : VisionFrame) -> bool:
175
+ detect_vision_frame = prepare_detect_frame(vision_frame, 'nsfw_1')
176
+ detection = forward_nsfw(detect_vision_frame, 'nsfw_1')
177
+ detection_score = numpy.max(numpy.amax(detection[:, 4:], axis = 1))
178
+ return bool(detection_score > 0.2)
179
+
180
+
181
+ def detect_with_nsfw_2(vision_frame : VisionFrame) -> bool:
182
+ detect_vision_frame = prepare_detect_frame(vision_frame, 'nsfw_2')
183
+ detection = forward_nsfw(detect_vision_frame, 'nsfw_2')
184
+ detection_score = detection[0] - detection[1]
185
+ return bool(detection_score > 0.25)
186
+
187
+
188
+ def detect_with_nsfw_3(vision_frame : VisionFrame) -> bool:
189
+ detect_vision_frame = prepare_detect_frame(vision_frame, 'nsfw_3')
190
+ detection = forward_nsfw(detect_vision_frame, 'nsfw_3')
191
+ detection_score = (detection[2] + detection[3]) - (detection[0] + detection[1])
192
+ return bool(detection_score > 10.5)
193
+
194
+
195
+ def forward_nsfw(vision_frame : VisionFrame, nsfw_model : str) -> Detection:
196
+ content_analyser = get_inference_pool().get(nsfw_model)
197
+
198
+ with conditional_thread_semaphore():
199
+ detection = content_analyser.run(None,
200
+ {
201
+ 'input': vision_frame
202
+ })[0]
203
+
204
+ if nsfw_model in [ 'nsfw_2', 'nsfw_3' ]:
205
+ return detection[0]
206
+
207
+ return detection
208
+
209
+
210
+ def prepare_detect_frame(temp_vision_frame : VisionFrame, model_name : str) -> VisionFrame:
211
+ model_set = create_static_model_set('full').get(model_name)
212
+ model_size = model_set.get('size')
213
+ model_mean = model_set.get('mean')
214
+ model_standard_deviation = model_set.get('standard_deviation')
215
+
216
+ detect_vision_frame = fit_frame(temp_vision_frame, model_size)
217
+ detect_vision_frame = detect_vision_frame[:, :, ::-1] / 255.0
218
+ detect_vision_frame -= model_mean
219
+ detect_vision_frame /= model_standard_deviation
220
+ detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
221
+ return detect_vision_frame
facefusion/core.py ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import shutil
3
+ import signal
4
+ import sys
5
+ import tempfile
6
+
7
+ # В начало core.py добавим импорты:
8
+ import os
9
+ import traceback
10
+ from facefusion.vision import read_static_image, write_image
11
+ from facefusion.face_store import get_reference_faces
12
+ from facefusion.face_analyser import get_many_faces, get_one_face
13
+ from facefusion.processors.core import get_processors_modules
14
+ from facefusion.vision import start_processing, end_processing
15
+ from time import time
16
+
17
+ import numpy
18
+
19
+ from facefusion import benchmarker, cli_helper, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, video_manager, voice_extractor, wording
20
+ from facefusion.args import apply_args, collect_job_args, reduce_job_args, reduce_step_args
21
+ from facefusion.common_helper import get_first
22
+ from facefusion.content_analyser import analyse_image, analyse_video
23
+ from facefusion.download import conditional_download_hashes, conditional_download_sources
24
+ from facefusion.exit_helper import hard_exit, signal_exit
25
+ from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face
26
+ from facefusion.face_selector import sort_and_filter_faces
27
+ from facefusion.face_store import append_reference_face, clear_reference_faces, get_reference_faces
28
+ from facefusion.ffmpeg import copy_image, extract_frames, finalize_image, merge_video, replace_audio, restore_audio
29
+ from facefusion.filesystem import filter_audio_paths, get_file_name, is_image, is_video, resolve_file_paths, resolve_file_pattern
30
+ from facefusion.jobs import job_helper, job_manager, job_runner
31
+ from facefusion.jobs.job_list import compose_job_list
32
+ from facefusion.memory import limit_system_memory
33
+ from facefusion.processors.core import get_processors_modules
34
+ from facefusion.program import create_program
35
+ from facefusion.program_helper import validate_args
36
+ from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths
37
+ from facefusion.types import Args, ErrorCode
38
+ from facefusion.vision import pack_resolution, read_image, read_static_images, read_video_frame, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution
39
+
40
+ from datetime import datetime
41
+
42
+ def cli() -> None:
43
+ if pre_check():
44
+ signal.signal(signal.SIGINT, signal_exit)
45
+ program = create_program()
46
+
47
+ if validate_args(program):
48
+ args = vars(program.parse_args())
49
+ apply_args(args, state_manager.init_item)
50
+
51
+ if state_manager.get_item('command'):
52
+ logger.init(state_manager.get_item('log_level'))
53
+ route(args)
54
+ else:
55
+ program.print_help()
56
+ else:
57
+ hard_exit(2)
58
+ else:
59
+ hard_exit(2)
60
+
61
+ def route(args : Args) -> None:
62
+ system_memory_limit = state_manager.get_item('system_memory_limit')
63
+
64
+ if system_memory_limit and system_memory_limit > 0:
65
+ limit_system_memory(system_memory_limit)
66
+
67
+ if state_manager.get_item('command') == 'force-download':
68
+ error_code = force_download()
69
+ return hard_exit(error_code)
70
+
71
+ if state_manager.get_item('command') == 'benchmark':
72
+ if not common_pre_check() or not processors_pre_check() or not benchmarker.pre_check():
73
+ return hard_exit(2)
74
+ benchmarker.render()
75
+
76
+ if state_manager.get_item('command') in [ 'job-list', 'job-create', 'job-submit', 'job-submit-all', 'job-delete', 'job-delete-all', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ]:
77
+ if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
78
+ hard_exit(1)
79
+ error_code = route_job_manager(args)
80
+ hard_exit(error_code)
81
+
82
+ if state_manager.get_item('command') == 'run':
83
+ # Импортируем существующий модуль и наш launcher
84
+ import facefusion.uis.core as ui
85
+ from facefusion.uis.launcher import launch_ui
86
+
87
+ # Сохраняем существующие проверки
88
+ if not common_pre_check() or not processors_pre_check():
89
+ return hard_exit(2)
90
+ for ui_layout in ui.get_ui_layouts_modules(state_manager.get_item('ui_layouts')):
91
+ if not ui_layout.pre_check():
92
+ return hard_exit(2)
93
+ # Инициализируем базовые компоненты UI
94
+ ui.init()
95
+ # Вместо ui.launch() используем наш launcher
96
+ launch_ui(args)
97
+
98
+ if state_manager.get_item('command') == 'headless-run':
99
+ if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
100
+ hard_exit(1)
101
+ error_core = process_headless(args)
102
+ hard_exit(error_core)
103
+
104
+ if state_manager.get_item('command') == 'batch-run':
105
+ if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
106
+ hard_exit(1)
107
+ error_core = process_batch(args)
108
+ hard_exit(error_core)
109
+
110
+ if state_manager.get_item('command') in [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]:
111
+ if not job_manager.init_jobs(state_manager.get_item('jobs_path')):
112
+ hard_exit(1)
113
+ error_code = route_job_runner()
114
+ hard_exit(error_code)
115
+
116
+ def pre_check() -> bool:
117
+ if sys.version_info < (3, 10):
118
+ logger.error(wording.get('python_not_supported').format(version = '3.10'), __name__)
119
+ return False
120
+
121
+ if not shutil.which('curl'):
122
+ logger.error(wording.get('curl_not_installed'), __name__)
123
+ return False
124
+
125
+ if not shutil.which('ffmpeg'):
126
+ logger.error(wording.get('ffmpeg_not_installed'), __name__)
127
+ return False
128
+ return True
129
+
130
+ def common_pre_check() -> bool:
131
+ common_modules =\
132
+ [
133
+ content_analyser,
134
+ face_classifier,
135
+ face_detector,
136
+ face_landmarker,
137
+ face_masker,
138
+ face_recognizer,
139
+ voice_extractor
140
+ ]
141
+
142
+ return all(module.pre_check() for module in common_modules)
143
+
144
+ def processors_pre_check() -> bool:
145
+ for processor_module in get_processors_modules(state_manager.get_item('processors')):
146
+ if not processor_module.pre_check():
147
+ return False
148
+ return True
149
+
150
+ def force_download() -> ErrorCode:
151
+ common_modules =\
152
+ [
153
+ content_analyser,
154
+ face_classifier,
155
+ face_detector,
156
+ face_landmarker,
157
+ face_masker,
158
+ face_recognizer,
159
+ voice_extractor
160
+ ]
161
+ available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ]
162
+ processor_modules = get_processors_modules(available_processors)
163
+
164
+ for module in common_modules + processor_modules:
165
+ if hasattr(module, 'create_static_model_set'):
166
+ for model in module.create_static_model_set(state_manager.get_item('download_scope')).values():
167
+ model_hash_set = model.get('hashes')
168
+ model_source_set = model.get('sources')
169
+
170
+ if model_hash_set and model_source_set:
171
+ if not conditional_download_hashes(model_hash_set) or not conditional_download_sources(model_source_set):
172
+ return 1
173
+
174
+ return 0
175
+
176
+ def route_job_manager(args : Args) -> ErrorCode:
177
+ if state_manager.get_item('command') == 'job-list':
178
+ job_headers, job_contents = compose_job_list(state_manager.get_item('job_status'))
179
+
180
+ if job_contents:
181
+ cli_helper.render_table(job_headers, job_contents)
182
+ return 0
183
+ return 1
184
+
185
+ if state_manager.get_item('command') == 'job-create':
186
+ if job_manager.create_job(state_manager.get_item('job_id')):
187
+ logger.info(wording.get('job_created').format(job_id = state_manager.get_item('job_id')), __name__)
188
+ return 0
189
+ logger.error(wording.get('job_not_created').format(job_id = state_manager.get_item('job_id')), __name__)
190
+ return 1
191
+
192
+ if state_manager.get_item('command') == 'job-submit':
193
+ if job_manager.submit_job(state_manager.get_item('job_id')):
194
+ logger.info(wording.get('job_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
195
+ return 0
196
+ logger.error(wording.get('job_not_submitted').format(job_id = state_manager.get_item('job_id')), __name__)
197
+ return 1
198
+
199
+ if state_manager.get_item('command') == 'job-submit-all':
200
+ if job_manager.submit_jobs(state_manager.get_item('halt_on_error')):
201
+ logger.info(wording.get('job_all_submitted'), __name__)
202
+ return 0
203
+ logger.error(wording.get('job_all_not_submitted'), __name__)
204
+ return 1
205
+
206
+ if state_manager.get_item('command') == 'job-delete':
207
+ if job_manager.delete_job(state_manager.get_item('job_id')):
208
+ logger.info(wording.get('job_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
209
+ return 0
210
+ logger.error(wording.get('job_not_deleted').format(job_id = state_manager.get_item('job_id')), __name__)
211
+ return 1
212
+
213
+ if state_manager.get_item('command') == 'job-delete-all':
214
+ if job_manager.delete_jobs(state_manager.get_item('halt_on_error')):
215
+ logger.info(wording.get('job_all_deleted'), __name__)
216
+ return 0
217
+ logger.error(wording.get('job_all_not_deleted'), __name__)
218
+ return 1
219
+
220
+ if state_manager.get_item('command') == 'job-add-step':
221
+ step_args = reduce_step_args(args)
222
+
223
+ if job_manager.add_step(state_manager.get_item('job_id'), step_args):
224
+ logger.info(wording.get('job_step_added').format(job_id = state_manager.get_item('job_id')), __name__)
225
+ return 0
226
+ logger.error(wording.get('job_step_not_added').format(job_id = state_manager.get_item('job_id')), __name__)
227
+ return 1
228
+
229
+ if state_manager.get_item('command') == 'job-remix-step':
230
+ step_args = reduce_step_args(args)
231
+
232
+ if job_manager.remix_step(state_manager.get_item('job_id'), state_manager.get_item('step_index'), step_args):
233
+ logger.info(wording.get('job_remix_step_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
234
+ return 0
235
+ logger.error(wording.get('job_remix_step_not_added').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
236
+ return 1
237
+
238
+ if state_manager.get_item('command') == 'job-insert-step':
239
+ step_args = reduce_step_args(args)
240
+
241
+ if job_manager.insert_step(state_manager.get_item('job_id'), state_manager.get_item('step_index'), step_args):
242
+ logger.info(wording.get('job_step_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
243
+ return 0
244
+ logger.error(wording.get('job_step_not_inserted').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
245
+ return 1
246
+
247
+ if state_manager.get_item('command') == 'job-remove-step':
248
+ if job_manager.remove_step(state_manager.get_item('job_id'), state_manager.get_item('step_index')):
249
+ logger.info(wording.get('job_step_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
250
+ return 0
251
+ logger.error(wording.get('job_step_not_removed').format(job_id = state_manager.get_item('job_id'), step_index = state_manager.get_item('step_index')), __name__)
252
+ return 1
253
+ return 1
254
+
255
+ def route_job_runner() -> ErrorCode:
256
+ if state_manager.get_item('command') == 'job-run':
257
+ logger.info(wording.get('running_job').format(job_id = state_manager.get_item('job_id')), __name__)
258
+ if job_runner.run_job(state_manager.get_item('job_id'), process_step):
259
+ logger.info(wording.get('processing_job_succeed').format(job_id = state_manager.get_item('job_id')), __name__)
260
+ return 0
261
+ logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
262
+ return 1
263
+
264
+ if state_manager.get_item('command') == 'job-run-all':
265
+ logger.info(wording.get('running_jobs'), __name__)
266
+ if job_runner.run_jobs(process_step, state_manager.get_item('halt_on_error')):
267
+ logger.info(wording.get('processing_jobs_succeed'), __name__)
268
+ return 0
269
+ logger.info(wording.get('processing_jobs_failed'), __name__)
270
+ return 1
271
+
272
+ if state_manager.get_item('command') == 'job-retry':
273
+ logger.info(wording.get('retrying_job').format(job_id = state_manager.get_item('job_id')), __name__)
274
+ if job_runner.retry_job(state_manager.get_item('job_id'), process_step):
275
+ logger.info(wording.get('processing_job_succeed').format(job_id = state_manager.get_item('job_id')), __name__)
276
+ return 0
277
+ logger.info(wording.get('processing_job_failed').format(job_id = state_manager.get_item('job_id')), __name__)
278
+ return 1
279
+
280
+ if state_manager.get_item('command') == 'job-retry-all':
281
+ logger.info(wording.get('retrying_jobs'), __name__)
282
+ if job_runner.retry_jobs(process_step, state_manager.get_item('halt_on_error')):
283
+ logger.info(wording.get('processing_jobs_succeed'), __name__)
284
+ return 0
285
+ logger.info(wording.get('processing_jobs_failed'), __name__)
286
+ return 1
287
+ return 2
288
+
289
+ def process_headless(args : Args) -> ErrorCode:
290
+ job_id = job_helper.suggest_job_id('headless')
291
+ step_args = reduce_step_args(args)
292
+
293
+ if job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step):
294
+ return 0
295
+ return 1
296
+
297
+ def process_batch(args : Args) -> ErrorCode:
298
+ job_id = job_helper.suggest_job_id('batch')
299
+ step_args = reduce_step_args(args)
300
+ job_args = reduce_job_args(args)
301
+ source_paths = resolve_file_pattern(job_args.get('source_pattern'))
302
+ target_paths = resolve_file_pattern(job_args.get('target_pattern'))
303
+
304
+ if job_manager.create_job(job_id):
305
+ if source_paths and target_paths:
306
+ for index, (source_path, target_path) in enumerate(itertools.product(source_paths, target_paths)):
307
+ step_args['source_paths'] = [ source_path ]
308
+ step_args['target_path'] = target_path
309
+ step_args['output_path'] = job_args.get('output_pattern').format(index = index)
310
+ if not job_manager.add_step(job_id, step_args):
311
+ return 1
312
+ if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step):
313
+ return 0
314
+
315
+ if not source_paths and target_paths:
316
+ for index, target_path in enumerate(target_paths):
317
+ step_args['target_path'] = target_path
318
+ step_args['output_path'] = job_args.get('output_pattern').format(index = index)
319
+ if not job_manager.add_step(job_id, step_args):
320
+ return 1
321
+ if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step):
322
+ return 0
323
+ return 1
324
+
325
+ def process_step(job_id: str, step_index: int, step_args: Args) -> bool:
326
+ start_time = time()
327
+ try:
328
+ clear_reference_faces()
329
+ step_total = job_manager.count_step_total(job_id)
330
+ job_args = collect_job_args()
331
+ step_args = {**job_args, **step_args} # Дефолты из job_args, затем пользовательские из step_args (step_args имеет приоритет)
332
+ apply_args(step_args, state_manager.set_item)
333
+
334
+ # Явно установите face_selector_index из step_args, если он есть
335
+ if 'face_selector_index' in step_args:
336
+ state_manager.set_item('face_selector_index', step_args['face_selector_index'])
337
+
338
+ # Добавлено: получаем текущую дату и время
339
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
340
+ logger.info(wording.get('processing_step').format(step_current=step_index + 1, step_total=step_total) + f" at {current_time}", __name__)
341
+
342
+ if common_pre_check() and processors_pre_check():
343
+ error_code = conditional_process()
344
+ if error_code == 0:
345
+ seconds = '{:.2f}'.format((time() - start_time) % 60)
346
+ logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__)
347
+ return error_code == 0
348
+ else:
349
+ return False
350
+ except Exception as e:
351
+ return False
352
+
353
+ def conditional_process() -> int:
354
+ processors_list = state_manager.get_item('processors')
355
+ # print("[DEBUG] Starting conditional_process")
356
+ # print(f"[DEBUG] face_selector_index from state_manager: {state_manager.get_item('face_selector_index')}")
357
+
358
+ try:
359
+ if is_image(state_manager.get_item('target_path')):
360
+ target_vision_frame = read_static_image(state_manager.get_item('target_path'))
361
+ if target_vision_frame is None:
362
+ # print("[DEBUG] Failed to read target image")
363
+ return 1
364
+
365
+ # Получаем source face
366
+ source_paths = state_manager.get_item('source_paths')
367
+ source_frames = read_static_images(source_paths)
368
+ source_faces = get_many_faces(source_frames)
369
+ source_face = get_one_face(source_faces)
370
+ if not source_face:
371
+ # print("[DEBUG] No source face found")
372
+ return 1
373
+
374
+ # Получаем target faces
375
+ target_faces = get_many_faces([target_vision_frame])
376
+ if not target_faces:
377
+ # print("[DEBUG] No target faces found")
378
+ return 1
379
+
380
+ # Сохраняем target faces как reference
381
+ clear_reference_faces()
382
+ for face in target_faces:
383
+ append_reference_face(face=face)
384
+ # print(f"[DEBUG] Added {len(target_faces)} reference faces")
385
+
386
+ # Обрабатываем frame
387
+ modified_frame = target_vision_frame.copy()
388
+ # print(f"[DEBUG] About to process with processors: {processors_list}")
389
+ for processor_module in get_processors_modules(processors_list):
390
+ if processor_module.pre_process('output'):
391
+ face_selector_index = state_manager.get_item('face_selector_index') or 0
392
+ # print(f"[DEBUG] face_selector_index: {face_selector_index}")
393
+ inputs = {
394
+ 'reference_faces': get_reference_faces(), # Оставь вызов функции, как было
395
+ 'source_face': source_face,
396
+ 'target_vision_frame': modified_frame,
397
+ 'face_selector_index': face_selector_index, # Добавь это
398
+ 'face_selector_mode': state_manager.get_item('face_selector_mode'), # Добавь это
399
+ 'face_selector_order': state_manager.get_item('face_selector_order'), # Добавь это
400
+ }
401
+ # print(f"[DEBUG] Calling process_frame for {processor_module.__name__} with inputs keys: {list(inputs.keys())}")
402
+ result = processor_module.process_frame(inputs)
403
+ if result is not None:
404
+ modified_frame = result
405
+ # print(f"[DEBUG] Processor {processor_module.__name__} succeeded")
406
+ else:
407
+ print(f"[DEBUG] Processor {processor_module.__name__} returned None")
408
+
409
+ # Добавлено: Читаем и логируем ошибку из файла (если есть)
410
+ error_file = os.path.join(tempfile.gettempdir(), 'facefusion_error.txt')
411
+ if os.path.exists(error_file):
412
+ with open(error_file, 'r') as f:
413
+ error_message = f.read().strip()
414
+ logger.error(error_message, __name__) # Вывод в TERMINAL UI через logger
415
+ os.remove(error_file) # Удаляем файл
416
+
417
+ # Сохраняем результат
418
+ output_path = state_manager.get_item('output_path')
419
+ success = write_image(output_path, modified_frame)
420
+ # print(f"[DEBUG] Write image success: {success} to {output_path}")
421
+
422
+ if not success:
423
+ return 1
424
+
425
+ return 0
426
+
427
+ except Exception as e:
428
+ # print(f"[DEBUG] Exception in conditional_process: {str(e)}")
429
+ traceback.print_exc()
430
+ return 1
431
+
432
+ def conditional_append_reference_faces() -> None:
433
+ if 'reference' in state_manager.get_item('face_selector_mode'):
434
+ target_path = state_manager.get_item('target_path')
435
+ if is_image(target_path):
436
+ target_frame = read_static_image(target_path)
437
+ if target_frame is not None:
438
+ target_faces = get_many_faces([target_frame])
439
+ if target_faces:
440
+ clear_reference_faces()
441
+ for face in target_faces:
442
+ append_reference_face(face=face)
443
+
444
+ # process image
445
+ def process_image(start_time : float) -> ErrorCode:
446
+ if analyse_image(state_manager.get_item('target_path')):
447
+ return 3
448
+
449
+ logger.debug(wording.get('clearing_temp'), __name__)
450
+ clear_temp_directory(state_manager.get_item('target_path'))
451
+ logger.debug(wording.get('creating_temp'), __name__)
452
+ create_temp_directory(state_manager.get_item('target_path'))
453
+
454
+ process_manager.start()
455
+ temp_image_resolution = pack_resolution(restrict_image_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_image_resolution'))))
456
+ logger.info(wording.get('copying_image').format(resolution = temp_image_resolution), __name__)
457
+ if copy_image(state_manager.get_item('target_path'), temp_image_resolution):
458
+ logger.debug(wording.get('copying_image_succeed'), __name__)
459
+ else:
460
+ logger.error(wording.get('copying_image_failed'), __name__)
461
+ process_manager.end()
462
+ return 1
463
+
464
+ temp_image_path = get_temp_file_path(state_manager.get_item('target_path'))
465
+ for processor_module in get_processors_modules(state_manager.get_item('processors')):
466
+ logger.info(wording.get('processing'), processor_module.__name__)
467
+ processor_module.process_image(state_manager.get_item('source_paths'), temp_image_path, temp_image_path)
468
+ processor_module.post_process()
469
+ if is_process_stopping():
470
+ process_manager.end()
471
+ return 4
472
+
473
+ # здесь изменяем
474
+ base, _ = os.path.splitext(state_manager.get_item('output_path'))
475
+ png_output_path = base + ".png"
476
+ state_manager.set_item('output_path', png_output_path)
477
+
478
+ logger.info(wording.get('finalizing_image').format(resolution = state_manager.get_item('output_image_resolution')), __name__)
479
+
480
+ # Вместо bool pull возвращаем путь
481
+ final_path = finalize_image(
482
+ state_manager.get_item('target_path'),
483
+ state_manager.get_item('output_path'),
484
+ state_manager.get_item('output_image_resolution')
485
+ )
486
+
487
+ if final_path:
488
+ logger.debug(wording.get('finalizing_image_succeed'), __name__)
489
+ # Обновляем путь в state_manager, чтобы Gradio GUI увидел новый файл
490
+ state_manager.set_item('output_path', final_path)
491
+ else:
492
+ logger.warn(wording.get('finalizing_image_skipped'), __name__)
493
+
494
+ logger.debug(wording.get('clearing_temp'), __name__)
495
+ clear_temp_directory(state_manager.get_item('target_path'))
496
+
497
+ # Проверяем существование файла по обновленному пути
498
+ if is_image(state_manager.get_item('output_path')):
499
+ seconds = '{:.2f}'.format((time() - start_time) % 60)
500
+ logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__)
501
+ else:
502
+ logger.error(f"Output file not found: {state_manager.get_item('output_path')}", __name__)
503
+ process_manager.end()
504
+ return 1
505
+ process_manager.end()
506
+ return 0
507
+
508
+ def process_video(start_time : float) -> ErrorCode:
509
+ trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end'))
510
+ if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end):
511
+ return 3
512
+
513
+ logger.debug(wording.get('clearing_temp'), __name__)
514
+ clear_temp_directory(state_manager.get_item('target_path'))
515
+ logger.debug(wording.get('creating_temp'), __name__)
516
+ create_temp_directory(state_manager.get_item('target_path'))
517
+
518
+ process_manager.start()
519
+ temp_video_resolution = pack_resolution(restrict_video_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_video_resolution'))))
520
+ temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
521
+ logger.info(wording.get('extracting_frames').format(resolution = temp_video_resolution, fps = temp_video_fps), __name__)
522
+ if extract_frames(state_manager.get_item('target_path'), temp_video_resolution, temp_video_fps, trim_frame_start, trim_frame_end):
523
+ logger.debug(wording.get('extracting_frames_succeed'), __name__)
524
+ else:
525
+ if is_process_stopping():
526
+ process_manager.end()
527
+ return 4
528
+ logger.error(wording.get('extracting_frames_failed'), __name__)
529
+ process_manager.end()
530
+ return 1
531
+
532
+ temp_frame_paths = resolve_temp_frame_paths(state_manager.get_item('target_path'))
533
+ if temp_frame_paths:
534
+ for processor_module in get_processors_modules(state_manager.get_item('processors')):
535
+ logger.info(wording.get('processing'), processor_module.__name__)
536
+ processor_module.process_video(state_manager.get_item('source_paths'), temp_frame_paths)
537
+ processor_module.post_process()
538
+ if is_process_stopping():
539
+ return 4
540
+ else:
541
+ logger.error(wording.get('temp_frames_not_found'), __name__)
542
+ process_manager.end()
543
+ return 1
544
+
545
+ logger.info(wording.get('merging_video').format(resolution = state_manager.get_item('output_video_resolution'), fps = state_manager.get_item('output_video_fps')), __name__)
546
+ if merge_video(state_manager.get_item('target_path'), temp_video_fps, state_manager.get_item('output_video_resolution'), state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end):
547
+ logger.debug(wording.get('merging_video_succeed'), __name__)
548
+ else:
549
+ if is_process_stopping():
550
+ process_manager.end()
551
+ return 4
552
+ logger.error(wording.get('merging_video_failed'), __name__)
553
+ process_manager.end()
554
+ return 1
555
+
556
+ if state_manager.get_item('output_audio_volume') == 0:
557
+ logger.info(wording.get('skipping_audio'), __name__)
558
+ move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
559
+ else:
560
+ source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
561
+ if source_audio_path:
562
+ if replace_audio(state_manager.get_item('target_path'), source_audio_path, state_manager.get_item('output_path')):
563
+ video_manager.clear_video_pool()
564
+ logger.debug(wording.get('replacing_audio_succeed'), __name__)
565
+ else:
566
+ video_manager.clear_video_pool()
567
+ if is_process_stopping():
568
+ process_manager.end()
569
+ return 4
570
+ logger.warn(wording.get('replacing_audio_skipped'), __name__)
571
+ move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
572
+ else:
573
+ if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), trim_frame_start, trim_frame_end):
574
+ video_manager.clear_video_pool()
575
+ logger.debug(wording.get('restoring_audio_succeed'), __name__)
576
+ else:
577
+ video_manager.clear_video_pool()
578
+ if is_process_stopping():
579
+ process_manager.end()
580
+ return 4
581
+ logger.warn(wording.get('restoring_audio_skipped'), __name__)
582
+ move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path'))
583
+
584
+ logger.debug(wording.get('clearing_temp'), __name__)
585
+ clear_temp_directory(state_manager.get_item('target_path'))
586
+
587
+ if is_video(state_manager.get_item('output_path')):
588
+ seconds = '{:.2f}'.format((time() - start_time))
589
+ logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__)
590
+ else:
591
+ logger.error(wording.get('processing_video_failed'), __name__)
592
+ process_manager.end()
593
+ return 1
594
+ process_manager.end()
595
+ return 0
596
+
597
+ def is_process_stopping() -> bool:
598
+ if process_manager.is_stopping():
599
+ process_manager.end()
600
+ logger.info(wording.get('processing_stopped'), __name__)
601
+ return process_manager.is_pending()
facefusion/curl_builder.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import shutil
3
+
4
+ from facefusion import metadata
5
+ from facefusion.types import Commands
6
+
7
+
8
+ def run(commands : Commands) -> Commands:
9
+ user_agent = metadata.get('name') + '/' + metadata.get('version')
10
+
11
+ return [ shutil.which('curl'), '--user-agent', user_agent, '--insecure', '--location', '--silent' ] + commands
12
+
13
+
14
+ def chain(*commands : Commands) -> Commands:
15
+ return list(itertools.chain(*commands))
16
+
17
+
18
+ def head(url : str) -> Commands:
19
+ return [ '-I', url ]
20
+
21
+
22
+ def download(url : str, download_file_path : str) -> Commands:
23
+ return [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ]
24
+
25
+
26
+ def set_timeout(timeout : int) -> Commands:
27
+ return [ '--connect-timeout', str(timeout) ]
facefusion/date_helper.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta
2
+ from typing import Optional, Tuple
3
+
4
+ from facefusion import wording
5
+
6
+
7
+ def get_current_date_time() -> datetime:
8
+ return datetime.now().astimezone()
9
+
10
+
11
+ def split_time_delta(time_delta : timedelta) -> Tuple[int, int, int, int]:
12
+ days, hours = divmod(time_delta.total_seconds(), 86400)
13
+ hours, minutes = divmod(hours, 3600)
14
+ minutes, seconds = divmod(minutes, 60)
15
+ return int(days), int(hours), int(minutes), int(seconds)
16
+
17
+
18
+ def describe_time_ago(date_time : datetime) -> Optional[str]:
19
+ time_ago = datetime.now(date_time.tzinfo) - date_time
20
+ days, hours, minutes, _ = split_time_delta(time_ago)
21
+
22
+ if timedelta(days = 1) < time_ago:
23
+ return wording.get('time_ago_days').format(days = days, hours = hours, minutes = minutes)
24
+ if timedelta(hours = 1) < time_ago:
25
+ return wording.get('time_ago_hours').format(hours = hours, minutes = minutes)
26
+ if timedelta(minutes = 1) < time_ago:
27
+ return wording.get('time_ago_minutes').format(minutes = minutes)
28
+ return wording.get('time_ago_now')
facefusion/download.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ from functools import lru_cache
4
+ from typing import List, Optional, Tuple
5
+ from urllib.parse import urlparse
6
+
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.choices
10
+ from facefusion import curl_builder, logger, process_manager, state_manager, wording
11
+ from facefusion.filesystem import get_file_name, get_file_size, is_file, remove_file
12
+ from facefusion.hash_helper import validate_hash
13
+ from facefusion.types import Commands, DownloadProvider, DownloadSet
14
+
15
+
16
+ def open_curl(commands : Commands) -> subprocess.Popen[bytes]:
17
+ commands = curl_builder.run(commands)
18
+ return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
19
+
20
+
21
+ def conditional_download(download_directory_path : str, urls : List[str]) -> None:
22
+ for url in urls:
23
+ download_file_name = os.path.basename(urlparse(url).path)
24
+ download_file_path = os.path.join(download_directory_path, download_file_name)
25
+ initial_size = get_file_size(download_file_path)
26
+ download_size = get_static_download_size(url)
27
+
28
+ if initial_size < download_size:
29
+ with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
30
+ commands = curl_builder.chain(
31
+ curl_builder.download(url, download_file_path),
32
+ curl_builder.set_timeout(10)
33
+ )
34
+ open_curl(commands)
35
+ current_size = initial_size
36
+ progress.set_postfix(download_providers = state_manager.get_item('download_providers'), file_name = download_file_name)
37
+
38
+ while current_size < download_size:
39
+ if is_file(download_file_path):
40
+ current_size = get_file_size(download_file_path)
41
+ progress.update(current_size - progress.n)
42
+
43
+
44
+ @lru_cache(maxsize = None)
45
+ def get_static_download_size(url : str) -> int:
46
+ commands = curl_builder.chain(
47
+ curl_builder.head(url),
48
+ curl_builder.set_timeout(5)
49
+ )
50
+ process = open_curl(commands)
51
+ lines = reversed(process.stdout.readlines())
52
+
53
+ for line in lines:
54
+ __line__ = line.decode().lower()
55
+ if 'content-length:' in __line__:
56
+ _, content_length = __line__.split('content-length:')
57
+ return int(content_length)
58
+
59
+ return 0
60
+
61
+
62
+ @lru_cache(maxsize = None)
63
+ def ping_static_url(url : str) -> bool:
64
+ commands = curl_builder.chain(
65
+ curl_builder.head(url),
66
+ curl_builder.set_timeout(5)
67
+ )
68
+ process = open_curl(commands)
69
+ process.communicate()
70
+ return process.returncode == 0
71
+
72
+
73
+ def conditional_download_hashes(hash_set : DownloadSet) -> bool:
74
+ hash_paths = [ hash_set.get(hash_key).get('path') for hash_key in hash_set.keys() ]
75
+
76
+ process_manager.check()
77
+ _, invalid_hash_paths = validate_hash_paths(hash_paths)
78
+ if invalid_hash_paths:
79
+ for index in hash_set:
80
+ if hash_set.get(index).get('path') in invalid_hash_paths:
81
+ invalid_hash_url = hash_set.get(index).get('url')
82
+ if invalid_hash_url:
83
+ download_directory_path = os.path.dirname(hash_set.get(index).get('path'))
84
+ conditional_download(download_directory_path, [ invalid_hash_url ])
85
+
86
+ valid_hash_paths, invalid_hash_paths = validate_hash_paths(hash_paths)
87
+
88
+ for valid_hash_path in valid_hash_paths:
89
+ valid_hash_file_name = get_file_name(valid_hash_path)
90
+ logger.debug(wording.get('validating_hash_succeed').format(hash_file_name = valid_hash_file_name), __name__)
91
+ for invalid_hash_path in invalid_hash_paths:
92
+ invalid_hash_file_name = get_file_name(invalid_hash_path)
93
+ logger.error(wording.get('validating_hash_failed').format(hash_file_name = invalid_hash_file_name), __name__)
94
+
95
+ if not invalid_hash_paths:
96
+ process_manager.end()
97
+ return not invalid_hash_paths
98
+
99
+
100
+ def conditional_download_sources(source_set : DownloadSet) -> bool:
101
+ source_paths = [ source_set.get(source_key).get('path') for source_key in source_set.keys() ]
102
+
103
+ process_manager.check()
104
+ _, invalid_source_paths = validate_source_paths(source_paths)
105
+ if invalid_source_paths:
106
+ for index in source_set:
107
+ if source_set.get(index).get('path') in invalid_source_paths:
108
+ invalid_source_url = source_set.get(index).get('url')
109
+ if invalid_source_url:
110
+ download_directory_path = os.path.dirname(source_set.get(index).get('path'))
111
+ conditional_download(download_directory_path, [ invalid_source_url ])
112
+
113
+ valid_source_paths, invalid_source_paths = validate_source_paths(source_paths)
114
+
115
+ for valid_source_path in valid_source_paths:
116
+ valid_source_file_name = get_file_name(valid_source_path)
117
+ logger.debug(wording.get('validating_source_succeed').format(source_file_name = valid_source_file_name), __name__)
118
+ for invalid_source_path in invalid_source_paths:
119
+ invalid_source_file_name = get_file_name(invalid_source_path)
120
+ logger.error(wording.get('validating_source_failed').format(source_file_name = invalid_source_file_name), __name__)
121
+
122
+ if remove_file(invalid_source_path):
123
+ logger.error(wording.get('deleting_corrupt_source').format(source_file_name = invalid_source_file_name), __name__)
124
+
125
+ if not invalid_source_paths:
126
+ process_manager.end()
127
+ return not invalid_source_paths
128
+
129
+
130
+ def validate_hash_paths(hash_paths : List[str]) -> Tuple[List[str], List[str]]:
131
+ valid_hash_paths = []
132
+ invalid_hash_paths = []
133
+
134
+ for hash_path in hash_paths:
135
+ if is_file(hash_path):
136
+ valid_hash_paths.append(hash_path)
137
+ else:
138
+ invalid_hash_paths.append(hash_path)
139
+
140
+ return valid_hash_paths, invalid_hash_paths
141
+
142
+
143
+ def validate_source_paths(source_paths : List[str]) -> Tuple[List[str], List[str]]:
144
+ valid_source_paths = []
145
+ invalid_source_paths = []
146
+
147
+ for source_path in source_paths:
148
+ if validate_hash(source_path):
149
+ valid_source_paths.append(source_path)
150
+ else:
151
+ invalid_source_paths.append(source_path)
152
+
153
+ return valid_source_paths, invalid_source_paths
154
+
155
+
156
+ def resolve_download_url(base_name : str, file_name : str) -> Optional[str]:
157
+ download_providers = state_manager.get_item('download_providers')
158
+
159
+ for download_provider in download_providers:
160
+ download_url = resolve_download_url_by_provider(download_provider, base_name, file_name)
161
+ if download_url:
162
+ return download_url
163
+
164
+ return None
165
+
166
+
167
+ def resolve_download_url_by_provider(download_provider : DownloadProvider, base_name : str, file_name : str) -> Optional[str]:
168
+ download_provider_value = facefusion.choices.download_provider_set.get(download_provider)
169
+
170
+ for download_provider_url in download_provider_value.get('urls'):
171
+ if ping_static_url(download_provider_url):
172
+ return download_provider_url + download_provider_value.get('path').format(base_name = base_name, file_name = file_name)
173
+
174
+ return None
facefusion/execution.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import shutil
2
+ import subprocess
3
+ import xml.etree.ElementTree as ElementTree
4
+ from functools import lru_cache
5
+ from typing import List, Optional
6
+
7
+ from onnxruntime import get_available_providers, set_default_logger_severity
8
+
9
+ import facefusion.choices
10
+ from facefusion.types import ExecutionDevice, ExecutionProvider, InferenceSessionProvider, ValueAndUnit
11
+
12
+ set_default_logger_severity(3)
13
+
14
+
15
+ def has_execution_provider(execution_provider : ExecutionProvider) -> bool:
16
+ return execution_provider in get_available_execution_providers()
17
+
18
+
19
+ def get_available_execution_providers() -> List[ExecutionProvider]:
20
+ inference_session_providers = get_available_providers()
21
+ available_execution_providers : List[ExecutionProvider] = []
22
+
23
+ for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items():
24
+ if execution_provider_value in inference_session_providers:
25
+ index = facefusion.choices.execution_providers.index(execution_provider)
26
+ available_execution_providers.insert(index, execution_provider)
27
+
28
+ return available_execution_providers
29
+
30
+
31
+ def create_inference_session_providers(execution_device_id : str, execution_providers : List[ExecutionProvider]) -> List[InferenceSessionProvider]:
32
+ inference_session_providers : List[InferenceSessionProvider] = []
33
+
34
+ for execution_provider in execution_providers:
35
+ if execution_provider == 'cuda':
36
+ inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
37
+ {
38
+ 'device_id': execution_device_id,
39
+ 'cudnn_conv_algo_search': resolve_cudnn_conv_algo_search()
40
+ }))
41
+ if execution_provider == 'tensorrt':
42
+ inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
43
+ {
44
+ 'device_id': execution_device_id,
45
+ 'trt_engine_cache_enable': True,
46
+ 'trt_engine_cache_path': '.caches',
47
+ 'trt_timing_cache_enable': True,
48
+ 'trt_timing_cache_path': '.caches',
49
+ 'trt_builder_optimization_level': 5
50
+ }))
51
+ if execution_provider in [ 'directml', 'rocm' ]:
52
+ inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
53
+ {
54
+ 'device_id': execution_device_id
55
+ }))
56
+ if execution_provider == 'openvino':
57
+ inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
58
+ {
59
+ 'device_type': resolve_openvino_device_type(execution_device_id),
60
+ 'precision': 'FP32'
61
+ }))
62
+ if execution_provider == 'coreml':
63
+ inference_session_providers.append((facefusion.choices.execution_provider_set.get(execution_provider),
64
+ {
65
+ 'SpecializationStrategy': 'FastPrediction',
66
+ 'ModelCacheDirectory': '.caches'
67
+ }))
68
+
69
+ if 'cpu' in execution_providers:
70
+ inference_session_providers.append(facefusion.choices.execution_provider_set.get('cpu'))
71
+
72
+ return inference_session_providers
73
+
74
+
75
+ def resolve_cudnn_conv_algo_search() -> str:
76
+ execution_devices = detect_static_execution_devices()
77
+ product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660')
78
+
79
+ for execution_device in execution_devices:
80
+ if execution_device.get('product').get('name').startswith(product_names):
81
+ return 'DEFAULT'
82
+
83
+ return 'EXHAUSTIVE'
84
+
85
+
86
+ def resolve_openvino_device_type(execution_device_id : str) -> str:
87
+ if execution_device_id == '0':
88
+ return 'GPU'
89
+ if execution_device_id == '∞':
90
+ return 'MULTI:GPU'
91
+ return 'GPU.' + execution_device_id
92
+
93
+
94
+ def run_nvidia_smi() -> subprocess.Popen[bytes]:
95
+ commands = [ shutil.which('nvidia-smi'), '--query', '--xml-format' ]
96
+ return subprocess.Popen(commands, stdout = subprocess.PIPE)
97
+
98
+
99
+ @lru_cache(maxsize = None)
100
+ def detect_static_execution_devices() -> List[ExecutionDevice]:
101
+ return detect_execution_devices()
102
+
103
+
104
+ def detect_execution_devices() -> List[ExecutionDevice]:
105
+ execution_devices : List[ExecutionDevice] = []
106
+
107
+ try:
108
+ output, _ = run_nvidia_smi().communicate()
109
+ root_element = ElementTree.fromstring(output)
110
+ except Exception:
111
+ root_element = ElementTree.Element('xml')
112
+
113
+ for gpu_element in root_element.findall('gpu'):
114
+ execution_devices.append(
115
+ {
116
+ 'driver_version': root_element.findtext('driver_version'),
117
+ 'framework':
118
+ {
119
+ 'name': 'CUDA',
120
+ 'version': root_element.findtext('cuda_version')
121
+ },
122
+ 'product':
123
+ {
124
+ 'vendor': 'NVIDIA',
125
+ 'name': gpu_element.findtext('product_name').replace('NVIDIA', '').strip()
126
+ },
127
+ 'video_memory':
128
+ {
129
+ 'total': create_value_and_unit(gpu_element.findtext('fb_memory_usage/total')),
130
+ 'free': create_value_and_unit(gpu_element.findtext('fb_memory_usage/free'))
131
+ },
132
+ 'temperature':
133
+ {
134
+ 'gpu': create_value_and_unit(gpu_element.findtext('temperature/gpu_temp')),
135
+ 'memory': create_value_and_unit(gpu_element.findtext('temperature/memory_temp'))
136
+ },
137
+ 'utilization':
138
+ {
139
+ 'gpu': create_value_and_unit(gpu_element.findtext('utilization/gpu_util')),
140
+ 'memory': create_value_and_unit(gpu_element.findtext('utilization/memory_util'))
141
+ }
142
+ })
143
+
144
+ return execution_devices
145
+
146
+
147
+ def create_value_and_unit(text : str) -> Optional[ValueAndUnit]:
148
+ if ' ' in text:
149
+ value, unit = text.split()
150
+
151
+ return\
152
+ {
153
+ 'value': int(value),
154
+ 'unit': str(unit)
155
+ }
156
+ return None
facefusion/exit_helper.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import signal
2
+ import sys
3
+ from time import sleep
4
+ from types import FrameType
5
+
6
+ from facefusion import process_manager, state_manager
7
+ from facefusion.temp_helper import clear_temp_directory
8
+ from facefusion.types import ErrorCode
9
+
10
+
11
+ def hard_exit(error_code : ErrorCode) -> None:
12
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
13
+ sys.exit(error_code)
14
+
15
+
16
+ def signal_exit(signum : int, frame : FrameType) -> None:
17
+ graceful_exit(0)
18
+
19
+
20
+ def graceful_exit(error_code : ErrorCode) -> None:
21
+ process_manager.stop()
22
+ while process_manager.is_processing():
23
+ sleep(0.5)
24
+ if state_manager.get_item('target_path'):
25
+ clear_temp_directory(state_manager.get_item('target_path'))
26
+ hard_exit(error_code)
facefusion/face_analyser.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import numpy
4
+
5
+ from facefusion import state_manager
6
+ from facefusion.common_helper import get_first
7
+ from facefusion.face_classifier import classify_face
8
+ from facefusion.face_detector import detect_faces, detect_rotated_faces
9
+ from facefusion.face_helper import apply_nms, convert_to_face_landmark_5, estimate_face_angle, get_nms_threshold
10
+ from facefusion.face_landmarker import detect_face_landmark, estimate_face_landmark_68_5
11
+ from facefusion.face_recognizer import calc_embedding
12
+ from facefusion.face_store import get_static_faces, set_static_faces
13
+ from facefusion.types import BoundingBox, Face, FaceLandmark5, FaceLandmarkSet, FaceScoreSet, Score, VisionFrame
14
+
15
+
16
+ def create_faces(vision_frame : VisionFrame, bounding_boxes : List[BoundingBox], face_scores : List[Score], face_landmarks_5 : List[FaceLandmark5]) -> List[Face]:
17
+ faces = []
18
+ nms_threshold = get_nms_threshold(state_manager.get_item('face_detector_model'), state_manager.get_item('face_detector_angles'))
19
+ keep_indices = apply_nms(bounding_boxes, face_scores, state_manager.get_item('face_detector_score'), nms_threshold)
20
+
21
+ for index in keep_indices:
22
+ bounding_box = bounding_boxes[index]
23
+ face_score = face_scores[index]
24
+ face_landmark_5 = face_landmarks_5[index]
25
+ face_landmark_5_68 = face_landmark_5
26
+ face_landmark_68_5 = estimate_face_landmark_68_5(face_landmark_5_68)
27
+ face_landmark_68 = face_landmark_68_5
28
+ face_landmark_score_68 = 0.0
29
+ face_angle = estimate_face_angle(face_landmark_68_5)
30
+
31
+ if state_manager.get_item('face_landmarker_score') > 0:
32
+ face_landmark_68, face_landmark_score_68 = detect_face_landmark(vision_frame, bounding_box, face_angle)
33
+ if face_landmark_score_68 > state_manager.get_item('face_landmarker_score'):
34
+ face_landmark_5_68 = convert_to_face_landmark_5(face_landmark_68)
35
+
36
+ face_landmark_set : FaceLandmarkSet =\
37
+ {
38
+ '5': face_landmark_5,
39
+ '5/68': face_landmark_5_68,
40
+ '68': face_landmark_68,
41
+ '68/5': face_landmark_68_5
42
+ }
43
+ face_score_set : FaceScoreSet =\
44
+ {
45
+ 'detector': face_score,
46
+ 'landmarker': face_landmark_score_68
47
+ }
48
+ embedding, normed_embedding = calc_embedding(vision_frame, face_landmark_set.get('5/68'))
49
+ gender, age, race = classify_face(vision_frame, face_landmark_set.get('5/68'))
50
+ faces.append(Face(
51
+ bounding_box = bounding_box,
52
+ score_set = face_score_set,
53
+ landmark_set = face_landmark_set,
54
+ angle = face_angle,
55
+ embedding = embedding,
56
+ normed_embedding = normed_embedding,
57
+ gender = gender,
58
+ age = age,
59
+ race = race
60
+ ))
61
+ return faces
62
+
63
+
64
+ def get_one_face(faces : List[Face], position : int = 0) -> Optional[Face]:
65
+ if faces:
66
+ position = min(position, len(faces) - 1)
67
+ return faces[position]
68
+ return None
69
+
70
+
71
+ def get_average_face(faces : List[Face]) -> Optional[Face]:
72
+ embeddings = []
73
+ normed_embeddings = []
74
+
75
+ if faces:
76
+ first_face = get_first(faces)
77
+
78
+ for face in faces:
79
+ embeddings.append(face.embedding)
80
+ normed_embeddings.append(face.normed_embedding)
81
+
82
+ return Face(
83
+ bounding_box = first_face.bounding_box,
84
+ score_set = first_face.score_set,
85
+ landmark_set = first_face.landmark_set,
86
+ angle = first_face.angle,
87
+ embedding = numpy.mean(embeddings, axis = 0),
88
+ normed_embedding = numpy.mean(normed_embeddings, axis = 0),
89
+ gender = first_face.gender,
90
+ age = first_face.age,
91
+ race = first_face.race
92
+ )
93
+ return None
94
+
95
+
96
+ def get_many_faces(vision_frames : List[VisionFrame]) -> List[Face]:
97
+ many_faces : List[Face] = []
98
+
99
+ for vision_frame in vision_frames:
100
+ if numpy.any(vision_frame):
101
+ static_faces = get_static_faces(vision_frame)
102
+ if static_faces:
103
+ many_faces.extend(static_faces)
104
+ else:
105
+ all_bounding_boxes = []
106
+ all_face_scores = []
107
+ all_face_landmarks_5 = []
108
+
109
+ for face_detector_angle in state_manager.get_item('face_detector_angles'):
110
+ if face_detector_angle == 0:
111
+ bounding_boxes, face_scores, face_landmarks_5 = detect_faces(vision_frame)
112
+ else:
113
+ bounding_boxes, face_scores, face_landmarks_5 = detect_rotated_faces(vision_frame, face_detector_angle)
114
+ all_bounding_boxes.extend(bounding_boxes)
115
+ all_face_scores.extend(face_scores)
116
+ all_face_landmarks_5.extend(face_landmarks_5)
117
+
118
+ if all_bounding_boxes and all_face_scores and all_face_landmarks_5 and state_manager.get_item('face_detector_score') > 0:
119
+ faces = create_faces(vision_frame, all_bounding_boxes, all_face_scores, all_face_landmarks_5)
120
+
121
+ if faces:
122
+ many_faces.extend(faces)
123
+ set_static_faces(vision_frame, faces)
124
+ return many_faces
facefusion/face_classifier.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Tuple
3
+
4
+ import numpy
5
+
6
+ from facefusion import inference_manager
7
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
8
+ from facefusion.face_helper import warp_face_by_face_landmark_5
9
+ from facefusion.filesystem import resolve_relative_path
10
+ from facefusion.thread_helper import conditional_thread_semaphore
11
+ from facefusion.types import Age, DownloadScope, FaceLandmark5, Gender, InferencePool, ModelOptions, ModelSet, Race, VisionFrame
12
+
13
+
14
+ @lru_cache(maxsize = None)
15
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
16
+ return\
17
+ {
18
+ 'fairface':
19
+ {
20
+ 'hashes':
21
+ {
22
+ 'face_classifier':
23
+ {
24
+ 'url': resolve_download_url('models-3.0.0', 'fairface.hash'),
25
+ 'path': resolve_relative_path('../.assets/models/fairface.hash')
26
+ }
27
+ },
28
+ 'sources':
29
+ {
30
+ 'face_classifier':
31
+ {
32
+ 'url': resolve_download_url('models-3.0.0', 'fairface.onnx'),
33
+ 'path': resolve_relative_path('../.assets/models/fairface.onnx')
34
+ }
35
+ },
36
+ 'template': 'arcface_112_v2',
37
+ 'size': (224, 224),
38
+ 'mean': [ 0.485, 0.456, 0.406 ],
39
+ 'standard_deviation': [ 0.229, 0.224, 0.225 ]
40
+ }
41
+ }
42
+
43
+
44
+ def get_inference_pool() -> InferencePool:
45
+ model_names = [ 'fairface' ]
46
+ model_source_set = get_model_options().get('sources')
47
+
48
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
49
+
50
+
51
+ def clear_inference_pool() -> None:
52
+ model_names = [ 'fairface' ]
53
+ inference_manager.clear_inference_pool(__name__, model_names)
54
+
55
+
56
+ def get_model_options() -> ModelOptions:
57
+ return create_static_model_set('full').get('fairface')
58
+
59
+
60
+ def pre_check() -> bool:
61
+ model_hash_set = get_model_options().get('hashes')
62
+ model_source_set = get_model_options().get('sources')
63
+
64
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
65
+
66
+
67
+ def classify_face(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Gender, Age, Race]:
68
+ model_template = get_model_options().get('template')
69
+ model_size = get_model_options().get('size')
70
+ model_mean = get_model_options().get('mean')
71
+ model_standard_deviation = get_model_options().get('standard_deviation')
72
+ crop_vision_frame, _ = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
73
+ crop_vision_frame = crop_vision_frame.astype(numpy.float32)[:, :, ::-1] / 255.0
74
+ crop_vision_frame -= model_mean
75
+ crop_vision_frame /= model_standard_deviation
76
+ crop_vision_frame = crop_vision_frame.transpose(2, 0, 1)
77
+ crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)
78
+ gender_id, age_id, race_id = forward(crop_vision_frame)
79
+ gender = categorize_gender(gender_id[0])
80
+ age = categorize_age(age_id[0])
81
+ race = categorize_race(race_id[0])
82
+ return gender, age, race
83
+
84
+
85
+ def forward(crop_vision_frame : VisionFrame) -> Tuple[List[int], List[int], List[int]]:
86
+ face_classifier = get_inference_pool().get('face_classifier')
87
+
88
+ with conditional_thread_semaphore():
89
+ race_id, gender_id, age_id = face_classifier.run(None,
90
+ {
91
+ 'input': crop_vision_frame
92
+ })
93
+
94
+ return gender_id, age_id, race_id
95
+
96
+
97
+ def categorize_gender(gender_id : int) -> Gender:
98
+ if gender_id == 1:
99
+ return 'female'
100
+ return 'male'
101
+
102
+
103
+ def categorize_age(age_id : int) -> Age:
104
+ if age_id == 0:
105
+ return range(0, 2)
106
+ if age_id == 1:
107
+ return range(3, 9)
108
+ if age_id == 2:
109
+ return range(10, 19)
110
+ if age_id == 3:
111
+ return range(20, 29)
112
+ if age_id == 4:
113
+ return range(30, 39)
114
+ if age_id == 5:
115
+ return range(40, 49)
116
+ if age_id == 6:
117
+ return range(50, 59)
118
+ if age_id == 7:
119
+ return range(60, 69)
120
+ return range(70, 100)
121
+
122
+
123
+ def categorize_race(race_id : int) -> Race:
124
+ if race_id == 1:
125
+ return 'black'
126
+ if race_id == 2:
127
+ return 'latino'
128
+ if race_id == 3 or race_id == 4:
129
+ return 'asian'
130
+ if race_id == 5:
131
+ return 'indian'
132
+ if race_id == 6:
133
+ return 'arabic'
134
+ return 'white'
facefusion/face_detector.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Sequence, Tuple
3
+
4
+ import cv2
5
+ import numpy
6
+
7
+ from facefusion import inference_manager, state_manager
8
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
9
+ from facefusion.face_helper import create_rotated_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points
10
+ from facefusion.filesystem import resolve_relative_path
11
+ from facefusion.thread_helper import thread_semaphore
12
+ from facefusion.types import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame
13
+ from facefusion.vision import restrict_frame, unpack_resolution
14
+
15
+
16
+ @lru_cache(maxsize = None)
17
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
18
+ return\
19
+ {
20
+ 'retinaface':
21
+ {
22
+ 'hashes':
23
+ {
24
+ 'retinaface':
25
+ {
26
+ 'url': resolve_download_url('models-3.0.0', 'retinaface_10g.hash'),
27
+ 'path': resolve_relative_path('../.assets/models/retinaface_10g.hash')
28
+ }
29
+ },
30
+ 'sources':
31
+ {
32
+ 'retinaface':
33
+ {
34
+ 'url': resolve_download_url('models-3.0.0', 'retinaface_10g.onnx'),
35
+ 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
36
+ }
37
+ }
38
+ },
39
+ 'scrfd':
40
+ {
41
+ 'hashes':
42
+ {
43
+ 'scrfd':
44
+ {
45
+ 'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.hash'),
46
+ 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.hash')
47
+ }
48
+ },
49
+ 'sources':
50
+ {
51
+ 'scrfd':
52
+ {
53
+ 'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.onnx'),
54
+ 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx')
55
+ }
56
+ }
57
+ },
58
+ 'yolo_face':
59
+ {
60
+ 'hashes':
61
+ {
62
+ 'yolo_face':
63
+ {
64
+ 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.hash'),
65
+ 'path': resolve_relative_path('../.assets/models/yoloface_8n.hash')
66
+ }
67
+ },
68
+ 'sources':
69
+ {
70
+ 'yolo_face':
71
+ {
72
+ 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.onnx'),
73
+ 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx')
74
+ }
75
+ }
76
+ }
77
+ }
78
+
79
+
80
+ def get_inference_pool() -> InferencePool:
81
+ model_names = [ state_manager.get_item('face_detector_model') ]
82
+ _, model_source_set = collect_model_downloads()
83
+
84
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
85
+
86
+
87
+ def clear_inference_pool() -> None:
88
+ model_names = [ state_manager.get_item('face_detector_model') ]
89
+ inference_manager.clear_inference_pool(__name__, model_names)
90
+
91
+
92
+ def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
93
+ model_set = create_static_model_set('full')
94
+ model_hash_set = {}
95
+ model_source_set = {}
96
+
97
+ for face_detector_model in [ 'retinaface', 'scrfd', 'yolo_face' ]:
98
+ if state_manager.get_item('face_detector_model') in [ 'many', face_detector_model ]:
99
+ model_hash_set[face_detector_model] = model_set.get(face_detector_model).get('hashes').get(face_detector_model)
100
+ model_source_set[face_detector_model] = model_set.get(face_detector_model).get('sources').get(face_detector_model)
101
+
102
+ return model_hash_set, model_source_set
103
+
104
+
105
+ def pre_check() -> bool:
106
+ model_hash_set, model_source_set = collect_model_downloads()
107
+
108
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
109
+
110
+
111
+ def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
112
+ all_bounding_boxes : List[BoundingBox] = []
113
+ all_face_scores : List[Score] = []
114
+ all_face_landmarks_5 : List[FaceLandmark5] = []
115
+
116
+ if state_manager.get_item('face_detector_model') in [ 'many', 'retinaface' ]:
117
+ bounding_boxes, face_scores, face_landmarks_5 = detect_with_retinaface(vision_frame, state_manager.get_item('face_detector_size'))
118
+ all_bounding_boxes.extend(bounding_boxes)
119
+ all_face_scores.extend(face_scores)
120
+ all_face_landmarks_5.extend(face_landmarks_5)
121
+
122
+ if state_manager.get_item('face_detector_model') in [ 'many', 'scrfd' ]:
123
+ bounding_boxes, face_scores, face_landmarks_5 = detect_with_scrfd(vision_frame, state_manager.get_item('face_detector_size'))
124
+ all_bounding_boxes.extend(bounding_boxes)
125
+ all_face_scores.extend(face_scores)
126
+ all_face_landmarks_5.extend(face_landmarks_5)
127
+
128
+ if state_manager.get_item('face_detector_model') in [ 'many', 'yolo_face' ]:
129
+ bounding_boxes, face_scores, face_landmarks_5 = detect_with_yolo_face(vision_frame, state_manager.get_item('face_detector_size'))
130
+ all_bounding_boxes.extend(bounding_boxes)
131
+ all_face_scores.extend(face_scores)
132
+ all_face_landmarks_5.extend(face_landmarks_5)
133
+
134
+ all_bounding_boxes = [ normalize_bounding_box(all_bounding_box) for all_bounding_box in all_bounding_boxes ]
135
+ return all_bounding_boxes, all_face_scores, all_face_landmarks_5
136
+
137
+
138
+ def detect_rotated_faces(vision_frame : VisionFrame, angle : Angle) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
139
+ rotated_matrix, rotated_size = create_rotated_matrix_and_size(angle, vision_frame.shape[:2][::-1])
140
+ rotated_vision_frame = cv2.warpAffine(vision_frame, rotated_matrix, rotated_size)
141
+ rotated_inverse_matrix = cv2.invertAffineTransform(rotated_matrix)
142
+ bounding_boxes, face_scores, face_landmarks_5 = detect_faces(rotated_vision_frame)
143
+ bounding_boxes = [ transform_bounding_box(bounding_box, rotated_inverse_matrix) for bounding_box in bounding_boxes ]
144
+ face_landmarks_5 = [ transform_points(face_landmark_5, rotated_inverse_matrix) for face_landmark_5 in face_landmarks_5 ]
145
+ return bounding_boxes, face_scores, face_landmarks_5
146
+
147
+
148
+ def detect_with_retinaface(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
149
+ bounding_boxes = []
150
+ face_scores = []
151
+ face_landmarks_5 = []
152
+ feature_strides = [ 8, 16, 32 ]
153
+ feature_map_channel = 3
154
+ anchor_total = 2
155
+ face_detector_score = state_manager.get_item('face_detector_score')
156
+ face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
157
+ temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
158
+ ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
159
+ ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
160
+ detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
161
+ detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
162
+ detection = forward_with_retinaface(detect_vision_frame)
163
+
164
+ for index, feature_stride in enumerate(feature_strides):
165
+ keep_indices = numpy.where(detection[index] >= face_detector_score)[0]
166
+
167
+ if numpy.any(keep_indices):
168
+ stride_height = face_detector_height // feature_stride
169
+ stride_width = face_detector_width // feature_stride
170
+ anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
171
+ bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
172
+ face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
173
+
174
+ for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
175
+ bounding_boxes.append(numpy.array(
176
+ [
177
+ bounding_box_raw[0] * ratio_width,
178
+ bounding_box_raw[1] * ratio_height,
179
+ bounding_box_raw[2] * ratio_width,
180
+ bounding_box_raw[3] * ratio_height
181
+ ]))
182
+
183
+ for face_score_raw in detection[index][keep_indices]:
184
+ face_scores.append(face_score_raw[0])
185
+
186
+ for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
187
+ face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
188
+
189
+ return bounding_boxes, face_scores, face_landmarks_5
190
+
191
+
192
+ def detect_with_scrfd(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
193
+ bounding_boxes = []
194
+ face_scores = []
195
+ face_landmarks_5 = []
196
+ feature_strides = [ 8, 16, 32 ]
197
+ feature_map_channel = 3
198
+ anchor_total = 2
199
+ face_detector_score = state_manager.get_item('face_detector_score')
200
+ face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
201
+ temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
202
+ ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
203
+ ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
204
+ detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
205
+ detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ -1, 1 ])
206
+ detection = forward_with_scrfd(detect_vision_frame)
207
+
208
+ for index, feature_stride in enumerate(feature_strides):
209
+ keep_indices = numpy.where(detection[index] >= face_detector_score)[0]
210
+
211
+ if numpy.any(keep_indices):
212
+ stride_height = face_detector_height // feature_stride
213
+ stride_width = face_detector_width // feature_stride
214
+ anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
215
+ bounding_boxes_raw = detection[index + feature_map_channel] * feature_stride
216
+ face_landmarks_5_raw = detection[index + feature_map_channel * 2] * feature_stride
217
+
218
+ for bounding_box_raw in distance_to_bounding_box(anchors, bounding_boxes_raw)[keep_indices]:
219
+ bounding_boxes.append(numpy.array(
220
+ [
221
+ bounding_box_raw[0] * ratio_width,
222
+ bounding_box_raw[1] * ratio_height,
223
+ bounding_box_raw[2] * ratio_width,
224
+ bounding_box_raw[3] * ratio_height
225
+ ]))
226
+
227
+ for face_score_raw in detection[index][keep_indices]:
228
+ face_scores.append(face_score_raw[0])
229
+
230
+ for face_landmark_raw_5 in distance_to_face_landmark_5(anchors, face_landmarks_5_raw)[keep_indices]:
231
+ face_landmarks_5.append(face_landmark_raw_5 * [ ratio_width, ratio_height ])
232
+
233
+ return bounding_boxes, face_scores, face_landmarks_5
234
+
235
+
236
+ def detect_with_yolo_face(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]:
237
+ bounding_boxes = []
238
+ face_scores = []
239
+ face_landmarks_5 = []
240
+ face_detector_score = state_manager.get_item('face_detector_score')
241
+ face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
242
+ temp_vision_frame = restrict_frame(vision_frame, (face_detector_width, face_detector_height))
243
+ ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0]
244
+ ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1]
245
+ detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size)
246
+ detect_vision_frame = normalize_detect_frame(detect_vision_frame, [ 0, 1 ])
247
+ detection = forward_with_yolo_face(detect_vision_frame)
248
+ detection = numpy.squeeze(detection).T
249
+ bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = numpy.split(detection, [ 4, 5 ], axis = 1)
250
+ keep_indices = numpy.where(face_scores_raw > face_detector_score)[0]
251
+
252
+ if numpy.any(keep_indices):
253
+ bounding_boxes_raw, face_scores_raw, face_landmarks_5_raw = bounding_boxes_raw[keep_indices], face_scores_raw[keep_indices], face_landmarks_5_raw[keep_indices]
254
+
255
+ for bounding_box_raw in bounding_boxes_raw:
256
+ bounding_boxes.append(numpy.array(
257
+ [
258
+ (bounding_box_raw[0] - bounding_box_raw[2] / 2) * ratio_width,
259
+ (bounding_box_raw[1] - bounding_box_raw[3] / 2) * ratio_height,
260
+ (bounding_box_raw[0] + bounding_box_raw[2] / 2) * ratio_width,
261
+ (bounding_box_raw[1] + bounding_box_raw[3] / 2) * ratio_height
262
+ ]))
263
+
264
+ face_scores = face_scores_raw.ravel().tolist()
265
+ face_landmarks_5_raw[:, 0::3] = (face_landmarks_5_raw[:, 0::3]) * ratio_width
266
+ face_landmarks_5_raw[:, 1::3] = (face_landmarks_5_raw[:, 1::3]) * ratio_height
267
+
268
+ for face_landmark_raw_5 in face_landmarks_5_raw:
269
+ face_landmarks_5.append(numpy.array(face_landmark_raw_5.reshape(-1, 3)[:, :2]))
270
+
271
+ return bounding_boxes, face_scores, face_landmarks_5
272
+
273
+
274
+ def forward_with_retinaface(detect_vision_frame : VisionFrame) -> Detection:
275
+ face_detector = get_inference_pool().get('retinaface')
276
+
277
+ with thread_semaphore():
278
+ detection = face_detector.run(None,
279
+ {
280
+ 'input': detect_vision_frame
281
+ })
282
+
283
+ return detection
284
+
285
+
286
+ def forward_with_scrfd(detect_vision_frame : VisionFrame) -> Detection:
287
+ face_detector = get_inference_pool().get('scrfd')
288
+
289
+ with thread_semaphore():
290
+ detection = face_detector.run(None,
291
+ {
292
+ 'input': detect_vision_frame
293
+ })
294
+
295
+ return detection
296
+
297
+
298
+ def forward_with_yolo_face(detect_vision_frame : VisionFrame) -> Detection:
299
+ face_detector = get_inference_pool().get('yolo_face')
300
+
301
+ with thread_semaphore():
302
+ detection = face_detector.run(None,
303
+ {
304
+ 'input': detect_vision_frame
305
+ })
306
+
307
+ return detection
308
+
309
+
310
+ def prepare_detect_frame(temp_vision_frame : VisionFrame, face_detector_size : str) -> VisionFrame:
311
+ face_detector_width, face_detector_height = unpack_resolution(face_detector_size)
312
+ detect_vision_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
313
+ detect_vision_frame[:temp_vision_frame.shape[0], :temp_vision_frame.shape[1], :] = temp_vision_frame
314
+ detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
315
+ return detect_vision_frame
316
+
317
+
318
+ def normalize_detect_frame(detect_vision_frame : VisionFrame, normalize_range : Sequence[int]) -> VisionFrame:
319
+ if normalize_range == [ -1, 1 ]:
320
+ return (detect_vision_frame - 127.5) / 128.0
321
+ if normalize_range == [ 0, 1 ]:
322
+ return detect_vision_frame / 255.0
323
+ return detect_vision_frame
facefusion/face_helper.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Sequence, Tuple
3
+
4
+ import cv2
5
+ import numpy
6
+ from cv2.typing import Size
7
+
8
+ from facefusion.types import Anchors, Angle, BoundingBox, Distance, FaceDetectorModel, FaceLandmark5, FaceLandmark68, Mask, Matrix, Points, Scale, Score, Translation, VisionFrame, WarpTemplate, WarpTemplateSet
9
+
10
+ WARP_TEMPLATE_SET : WarpTemplateSet =\
11
+ {
12
+ 'arcface_112_v1': numpy.array(
13
+ [
14
+ [ 0.35473214, 0.45658929 ],
15
+ [ 0.64526786, 0.45658929 ],
16
+ [ 0.50000000, 0.61154464 ],
17
+ [ 0.37913393, 0.77687500 ],
18
+ [ 0.62086607, 0.77687500 ]
19
+ ]),
20
+ 'arcface_112_v2': numpy.array(
21
+ [
22
+ [ 0.34191607, 0.46157411 ],
23
+ [ 0.65653393, 0.45983393 ],
24
+ [ 0.50022500, 0.64050536 ],
25
+ [ 0.37097589, 0.82469196 ],
26
+ [ 0.63151696, 0.82325089 ]
27
+ ]),
28
+ 'arcface_128': numpy.array(
29
+ [
30
+ [ 0.36167656, 0.40387734 ],
31
+ [ 0.63696719, 0.40235469 ],
32
+ [ 0.50019687, 0.56044219 ],
33
+ [ 0.38710391, 0.72160547 ],
34
+ [ 0.61507734, 0.72034453 ]
35
+ ]),
36
+ 'dfl_whole_face': numpy.array(
37
+ [
38
+ [ 0.35342266, 0.39285716 ],
39
+ [ 0.62797622, 0.39285716 ],
40
+ [ 0.48660713, 0.54017860 ],
41
+ [ 0.38839287, 0.68750011 ],
42
+ [ 0.59821427, 0.68750011 ]
43
+ ]),
44
+ 'ffhq_512': numpy.array(
45
+ [
46
+ [ 0.37691676, 0.46864664 ],
47
+ [ 0.62285697, 0.46912813 ],
48
+ [ 0.50123859, 0.61331904 ],
49
+ [ 0.39308822, 0.72541100 ],
50
+ [ 0.61150205, 0.72490465 ]
51
+ ]),
52
+ 'mtcnn_512': numpy.array(
53
+ [
54
+ [ 0.36562865, 0.46733799 ],
55
+ [ 0.63305391, 0.46585885 ],
56
+ [ 0.50019127, 0.61942959 ],
57
+ [ 0.39032951, 0.77598822 ],
58
+ [ 0.61178945, 0.77476328 ]
59
+ ]),
60
+ 'styleganex_384': numpy.array(
61
+ [
62
+ [ 0.42353745, 0.52289879 ],
63
+ [ 0.57725008, 0.52319972 ],
64
+ [ 0.50123859, 0.61331904 ],
65
+ [ 0.43364461, 0.68337652 ],
66
+ [ 0.57015325, 0.68306005 ]
67
+ ])
68
+ }
69
+
70
+
71
+ def estimate_matrix_by_face_landmark_5(face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Matrix:
72
+ normed_warp_template = WARP_TEMPLATE_SET.get(warp_template) * crop_size
73
+ affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
74
+ return affine_matrix
75
+
76
+
77
+ def warp_face_by_face_landmark_5(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Tuple[VisionFrame, Matrix]:
78
+ affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, warp_template, crop_size)
79
+ crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size, borderMode = cv2.BORDER_REPLICATE, flags = cv2.INTER_AREA)
80
+ return crop_vision_frame, affine_matrix
81
+
82
+
83
+ def warp_face_by_bounding_box(temp_vision_frame : VisionFrame, bounding_box : BoundingBox, crop_size : Size) -> Tuple[VisionFrame, Matrix]:
84
+ source_points = numpy.array([ [ bounding_box[0], bounding_box[1] ], [bounding_box[2], bounding_box[1] ], [ bounding_box[0], bounding_box[3] ] ]).astype(numpy.float32)
85
+ target_points = numpy.array([ [ 0, 0 ], [ crop_size[0], 0 ], [ 0, crop_size[1] ] ]).astype(numpy.float32)
86
+ affine_matrix = cv2.getAffineTransform(source_points, target_points)
87
+ if bounding_box[2] - bounding_box[0] > crop_size[0] or bounding_box[3] - bounding_box[1] > crop_size[1]:
88
+ interpolation_method = cv2.INTER_AREA
89
+ else:
90
+ interpolation_method = cv2.INTER_LINEAR
91
+ crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size, flags = interpolation_method)
92
+ return crop_vision_frame, affine_matrix
93
+
94
+
95
+ def warp_face_by_translation(temp_vision_frame : VisionFrame, translation : Translation, scale : float, crop_size : Size) -> Tuple[VisionFrame, Matrix]:
96
+ affine_matrix = numpy.array([ [ scale, 0, translation[0] ], [ 0, scale, translation[1] ] ])
97
+ crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size)
98
+ return crop_vision_frame, affine_matrix
99
+
100
+
101
+ def paste_back(temp_vision_frame : VisionFrame, crop_vision_frame : VisionFrame, crop_mask : Mask, affine_matrix : Matrix) -> VisionFrame:
102
+ paste_bounding_box, paste_matrix = calc_paste_area(temp_vision_frame, crop_vision_frame, affine_matrix)
103
+ x_min, y_min, x_max, y_max = paste_bounding_box
104
+ paste_width = x_max - x_min
105
+ paste_height = y_max - y_min
106
+ inverse_mask = cv2.warpAffine(crop_mask, paste_matrix, (paste_width, paste_height)).clip(0, 1)
107
+ inverse_mask = numpy.expand_dims(inverse_mask, axis = -1)
108
+ inverse_vision_frame = cv2.warpAffine(crop_vision_frame, paste_matrix, (paste_width, paste_height), borderMode = cv2.BORDER_REPLICATE)
109
+ temp_vision_frame = temp_vision_frame.copy()
110
+ paste_vision_frame = temp_vision_frame[y_min:y_max, x_min:x_max]
111
+ paste_vision_frame = paste_vision_frame * (1 - inverse_mask) + inverse_vision_frame * inverse_mask
112
+ temp_vision_frame[y_min:y_max, x_min:x_max] = paste_vision_frame.astype(temp_vision_frame.dtype)
113
+ return temp_vision_frame
114
+
115
+
116
+ def calc_paste_area(temp_vision_frame : VisionFrame, crop_vision_frame : VisionFrame, affine_matrix : Matrix) -> Tuple[BoundingBox, Matrix]:
117
+ temp_height, temp_width = temp_vision_frame.shape[:2]
118
+ crop_height, crop_width = crop_vision_frame.shape[:2]
119
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
120
+ crop_points = numpy.array([ [ 0, 0 ], [ crop_width, 0 ], [ crop_width, crop_height ], [ 0, crop_height ] ])
121
+ paste_region_points = transform_points(crop_points, inverse_matrix)
122
+ min_point = numpy.floor(paste_region_points.min(axis = 0)).astype(int)
123
+ max_point = numpy.ceil(paste_region_points.max(axis = 0)).astype(int)
124
+ x_min, y_min = numpy.clip(min_point, 0, [ temp_width, temp_height ])
125
+ x_max, y_max = numpy.clip(max_point, 0, [ temp_width, temp_height ])
126
+ paste_bounding_box = numpy.array([ x_min, y_min, x_max, y_max ])
127
+ paste_matrix = inverse_matrix.copy()
128
+ paste_matrix[0, 2] -= x_min
129
+ paste_matrix[1, 2] -= y_min
130
+ return paste_bounding_box, paste_matrix
131
+
132
+
133
+ @lru_cache(maxsize = None)
134
+ def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> Anchors:
135
+ y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
136
+ anchors = numpy.stack((y, x), axis = -1)
137
+ anchors = (anchors * feature_stride).reshape((-1, 2))
138
+ anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
139
+ return anchors
140
+
141
+
142
+ def create_rotated_matrix_and_size(angle : Angle, size : Size) -> Tuple[Matrix, Size]:
143
+ rotated_matrix = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1)
144
+ rotated_size = numpy.dot(numpy.abs(rotated_matrix[:, :2]), size)
145
+ rotated_matrix[:, -1] += (rotated_size - size) * 0.5 #type:ignore[misc]
146
+ rotated_size = int(rotated_size[0]), int(rotated_size[1])
147
+ return rotated_matrix, rotated_size
148
+
149
+
150
+ def create_bounding_box(face_landmark_68 : FaceLandmark68) -> BoundingBox:
151
+ min_x, min_y = numpy.min(face_landmark_68, axis = 0)
152
+ max_x, max_y = numpy.max(face_landmark_68, axis = 0)
153
+ bounding_box = normalize_bounding_box(numpy.array([ min_x, min_y, max_x, max_y ]))
154
+ return bounding_box
155
+
156
+
157
+ def normalize_bounding_box(bounding_box : BoundingBox) -> BoundingBox:
158
+ x1, y1, x2, y2 = bounding_box
159
+ x1, x2 = sorted([ x1, x2 ])
160
+ y1, y2 = sorted([ y1, y2 ])
161
+ return numpy.array([ x1, y1, x2, y2 ])
162
+
163
+
164
+ def transform_points(points : Points, matrix : Matrix) -> Points:
165
+ points = points.reshape(-1, 1, 2)
166
+ points = cv2.transform(points, matrix) #type:ignore[assignment]
167
+ points = points.reshape(-1, 2)
168
+ return points
169
+
170
+
171
+ def transform_bounding_box(bounding_box : BoundingBox, matrix : Matrix) -> BoundingBox:
172
+ points = numpy.array(
173
+ [
174
+ [ bounding_box[0], bounding_box[1] ],
175
+ [ bounding_box[2], bounding_box[1] ],
176
+ [ bounding_box[2], bounding_box[3] ],
177
+ [ bounding_box[0], bounding_box[3] ]
178
+ ])
179
+ points = transform_points(points, matrix)
180
+ x1, y1 = numpy.min(points, axis = 0)
181
+ x2, y2 = numpy.max(points, axis = 0)
182
+ return normalize_bounding_box(numpy.array([ x1, y1, x2, y2 ]))
183
+
184
+
185
+ def distance_to_bounding_box(points : Points, distance : Distance) -> BoundingBox:
186
+ x1 = points[:, 0] - distance[:, 0]
187
+ y1 = points[:, 1] - distance[:, 1]
188
+ x2 = points[:, 0] + distance[:, 2]
189
+ y2 = points[:, 1] + distance[:, 3]
190
+ bounding_box = numpy.column_stack([ x1, y1, x2, y2 ])
191
+ return bounding_box
192
+
193
+
194
+ def distance_to_face_landmark_5(points : Points, distance : Distance) -> FaceLandmark5:
195
+ x = points[:, 0::2] + distance[:, 0::2]
196
+ y = points[:, 1::2] + distance[:, 1::2]
197
+ face_landmark_5 = numpy.stack((x, y), axis = -1)
198
+ return face_landmark_5
199
+
200
+
201
+ def scale_face_landmark_5(face_landmark_5 : FaceLandmark5, scale : Scale) -> FaceLandmark5:
202
+ face_landmark_5_scale = face_landmark_5 - face_landmark_5[2]
203
+ face_landmark_5_scale *= scale
204
+ face_landmark_5_scale += face_landmark_5[2]
205
+ return face_landmark_5_scale
206
+
207
+
208
+ def convert_to_face_landmark_5(face_landmark_68 : FaceLandmark68) -> FaceLandmark5:
209
+ face_landmark_5 = numpy.array(
210
+ [
211
+ numpy.mean(face_landmark_68[36:42], axis = 0),
212
+ numpy.mean(face_landmark_68[42:48], axis = 0),
213
+ face_landmark_68[30],
214
+ face_landmark_68[48],
215
+ face_landmark_68[54]
216
+ ])
217
+ return face_landmark_5
218
+
219
+
220
+ def estimate_face_angle(face_landmark_68 : FaceLandmark68) -> Angle:
221
+ x1, y1 = face_landmark_68[0]
222
+ x2, y2 = face_landmark_68[16]
223
+ theta = numpy.arctan2(y2 - y1, x2 - x1)
224
+ theta = numpy.degrees(theta) % 360
225
+ angles = numpy.linspace(0, 360, 5)
226
+ index = numpy.argmin(numpy.abs(angles - theta))
227
+ face_angle = int(angles[index] % 360)
228
+ return face_angle
229
+
230
+
231
+ def apply_nms(bounding_boxes : List[BoundingBox], scores : List[Score], score_threshold : float, nms_threshold : float) -> Sequence[int]:
232
+ normed_bounding_boxes = [ (x1, y1, x2 - x1, y2 - y1) for (x1, y1, x2, y2) in bounding_boxes ]
233
+ keep_indices = cv2.dnn.NMSBoxes(normed_bounding_boxes, scores, score_threshold = score_threshold, nms_threshold = nms_threshold)
234
+ return keep_indices
235
+
236
+
237
+ def get_nms_threshold(face_detector_model : FaceDetectorModel, face_detector_angles : List[Angle]) -> float:
238
+ if face_detector_model == 'many':
239
+ return 0.1
240
+ if len(face_detector_angles) == 2:
241
+ return 0.3
242
+ if len(face_detector_angles) == 3:
243
+ return 0.2
244
+ if len(face_detector_angles) == 4:
245
+ return 0.1
246
+ return 0.4
247
+
248
+
249
+ def merge_matrix(matrices : List[Matrix]) -> Matrix:
250
+ merged_matrix = numpy.vstack([ matrices[0], [ 0, 0, 1 ] ])
251
+ for matrix in matrices[1:]:
252
+ matrix = numpy.vstack([ matrix, [ 0, 0, 1 ] ])
253
+ merged_matrix = numpy.dot(merged_matrix, matrix)
254
+ return merged_matrix[:2, :]
facefusion/face_landmarker.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import Tuple
3
+
4
+ import cv2
5
+ import numpy
6
+
7
+ from facefusion import inference_manager, state_manager
8
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
9
+ from facefusion.face_helper import create_rotated_matrix_and_size, estimate_matrix_by_face_landmark_5, transform_points, warp_face_by_translation
10
+ from facefusion.filesystem import resolve_relative_path
11
+ from facefusion.thread_helper import conditional_thread_semaphore
12
+ from facefusion.types import Angle, BoundingBox, DownloadScope, DownloadSet, FaceLandmark5, FaceLandmark68, InferencePool, ModelSet, Prediction, Score, VisionFrame
13
+
14
+
15
+ @lru_cache(maxsize = None)
16
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
17
+ return\
18
+ {
19
+ '2dfan4':
20
+ {
21
+ 'hashes':
22
+ {
23
+ '2dfan4':
24
+ {
25
+ 'url': resolve_download_url('models-3.0.0', '2dfan4.hash'),
26
+ 'path': resolve_relative_path('../.assets/models/2dfan4.hash')
27
+ }
28
+ },
29
+ 'sources':
30
+ {
31
+ '2dfan4':
32
+ {
33
+ 'url': resolve_download_url('models-3.0.0', '2dfan4.onnx'),
34
+ 'path': resolve_relative_path('../.assets/models/2dfan4.onnx')
35
+ }
36
+ },
37
+ 'size': (256, 256)
38
+ },
39
+ 'peppa_wutz':
40
+ {
41
+ 'hashes':
42
+ {
43
+ 'peppa_wutz':
44
+ {
45
+ 'url': resolve_download_url('models-3.0.0', 'peppa_wutz.hash'),
46
+ 'path': resolve_relative_path('../.assets/models/peppa_wutz.hash')
47
+ }
48
+ },
49
+ 'sources':
50
+ {
51
+ 'peppa_wutz':
52
+ {
53
+ 'url': resolve_download_url('models-3.0.0', 'peppa_wutz.onnx'),
54
+ 'path': resolve_relative_path('../.assets/models/peppa_wutz.onnx')
55
+ }
56
+ },
57
+ 'size': (256, 256)
58
+ },
59
+ 'fan_68_5':
60
+ {
61
+ 'hashes':
62
+ {
63
+ 'fan_68_5':
64
+ {
65
+ 'url': resolve_download_url('models-3.0.0', 'fan_68_5.hash'),
66
+ 'path': resolve_relative_path('../.assets/models/fan_68_5.hash')
67
+ }
68
+ },
69
+ 'sources':
70
+ {
71
+ 'fan_68_5':
72
+ {
73
+ 'url': resolve_download_url('models-3.0.0', 'fan_68_5.onnx'),
74
+ 'path': resolve_relative_path('../.assets/models/fan_68_5.onnx')
75
+ }
76
+ }
77
+ }
78
+ }
79
+
80
+
81
+ def get_inference_pool() -> InferencePool:
82
+ model_names = [ state_manager.get_item('face_landmarker_model'), 'fan_68_5' ]
83
+ _, model_source_set = collect_model_downloads()
84
+
85
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
86
+
87
+
88
+ def clear_inference_pool() -> None:
89
+ model_names = [ state_manager.get_item('face_landmarker_model'), 'fan_68_5' ]
90
+ inference_manager.clear_inference_pool(__name__, model_names)
91
+
92
+
93
+ def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
94
+ model_set = create_static_model_set('full')
95
+ model_hash_set =\
96
+ {
97
+ 'fan_68_5': model_set.get('fan_68_5').get('hashes').get('fan_68_5')
98
+ }
99
+ model_source_set =\
100
+ {
101
+ 'fan_68_5': model_set.get('fan_68_5').get('sources').get('fan_68_5')
102
+ }
103
+
104
+ for face_landmarker_model in [ '2dfan4', 'peppa_wutz' ]:
105
+ if state_manager.get_item('face_landmarker_model') in [ 'many', face_landmarker_model ]:
106
+ model_hash_set[face_landmarker_model] = model_set.get(face_landmarker_model).get('hashes').get(face_landmarker_model)
107
+ model_source_set[face_landmarker_model] = model_set.get(face_landmarker_model).get('sources').get(face_landmarker_model)
108
+
109
+ return model_hash_set, model_source_set
110
+
111
+
112
+ def pre_check() -> bool:
113
+ model_hash_set, model_source_set = collect_model_downloads()
114
+
115
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
116
+
117
+
118
+ def detect_face_landmark(vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]:
119
+ face_landmark_2dfan4 = None
120
+ face_landmark_peppa_wutz = None
121
+ face_landmark_score_2dfan4 = 0.0
122
+ face_landmark_score_peppa_wutz = 0.0
123
+
124
+ if state_manager.get_item('face_landmarker_model') in [ 'many', '2dfan4' ]:
125
+ face_landmark_2dfan4, face_landmark_score_2dfan4 = detect_with_2dfan4(vision_frame, bounding_box, face_angle)
126
+
127
+ if state_manager.get_item('face_landmarker_model') in [ 'many', 'peppa_wutz' ]:
128
+ face_landmark_peppa_wutz, face_landmark_score_peppa_wutz = detect_with_peppa_wutz(vision_frame, bounding_box, face_angle)
129
+
130
+ if face_landmark_score_2dfan4 > face_landmark_score_peppa_wutz - 0.2:
131
+ return face_landmark_2dfan4, face_landmark_score_2dfan4
132
+ return face_landmark_peppa_wutz, face_landmark_score_peppa_wutz
133
+
134
+
135
+ def detect_with_2dfan4(temp_vision_frame: VisionFrame, bounding_box: BoundingBox, face_angle: Angle) -> Tuple[FaceLandmark68, Score]:
136
+ model_size = create_static_model_set('full').get('2dfan4').get('size')
137
+ scale = 195 / numpy.subtract(bounding_box[2:], bounding_box[:2]).max().clip(1, None)
138
+ translation = (model_size[0] - numpy.add(bounding_box[2:], bounding_box[:2]) * scale) * 0.5
139
+ rotated_matrix, rotated_size = create_rotated_matrix_and_size(face_angle, model_size)
140
+ crop_vision_frame, affine_matrix = warp_face_by_translation(temp_vision_frame, translation, scale, model_size)
141
+ crop_vision_frame = cv2.warpAffine(crop_vision_frame, rotated_matrix, rotated_size)
142
+ crop_vision_frame = conditional_optimize_contrast(crop_vision_frame)
143
+ crop_vision_frame = crop_vision_frame.transpose(2, 0, 1).astype(numpy.float32) / 255.0
144
+ face_landmark_68, face_heatmap = forward_with_2dfan4(crop_vision_frame)
145
+ face_landmark_68 = face_landmark_68[:, :, :2][0] / 64 * 256
146
+ face_landmark_68 = transform_points(face_landmark_68, cv2.invertAffineTransform(rotated_matrix))
147
+ face_landmark_68 = transform_points(face_landmark_68, cv2.invertAffineTransform(affine_matrix))
148
+ face_landmark_score_68 = numpy.amax(face_heatmap, axis = (2, 3))
149
+ face_landmark_score_68 = numpy.mean(face_landmark_score_68)
150
+ face_landmark_score_68 = numpy.interp(face_landmark_score_68, [ 0, 0.9 ], [ 0, 1 ])
151
+ return face_landmark_68, face_landmark_score_68
152
+
153
+
154
+ def detect_with_peppa_wutz(temp_vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]:
155
+ model_size = create_static_model_set('full').get('peppa_wutz').get('size')
156
+ scale = 195 / numpy.subtract(bounding_box[2:], bounding_box[:2]).max().clip(1, None)
157
+ translation = (model_size[0] - numpy.add(bounding_box[2:], bounding_box[:2]) * scale) * 0.5
158
+ rotated_matrix, rotated_size = create_rotated_matrix_and_size(face_angle, model_size)
159
+ crop_vision_frame, affine_matrix = warp_face_by_translation(temp_vision_frame, translation, scale, model_size)
160
+ crop_vision_frame = cv2.warpAffine(crop_vision_frame, rotated_matrix, rotated_size)
161
+ crop_vision_frame = conditional_optimize_contrast(crop_vision_frame)
162
+ crop_vision_frame = crop_vision_frame.transpose(2, 0, 1).astype(numpy.float32) / 255.0
163
+ crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)
164
+ prediction = forward_with_peppa_wutz(crop_vision_frame)
165
+ face_landmark_68 = prediction.reshape(-1, 3)[:, :2] / 64 * model_size[0]
166
+ face_landmark_68 = transform_points(face_landmark_68, cv2.invertAffineTransform(rotated_matrix))
167
+ face_landmark_68 = transform_points(face_landmark_68, cv2.invertAffineTransform(affine_matrix))
168
+ face_landmark_score_68 = prediction.reshape(-1, 3)[:, 2].mean()
169
+ face_landmark_score_68 = numpy.interp(face_landmark_score_68, [ 0, 0.95 ], [ 0, 1 ])
170
+ return face_landmark_68, face_landmark_score_68
171
+
172
+
173
+ def conditional_optimize_contrast(crop_vision_frame : VisionFrame) -> VisionFrame:
174
+ crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_RGB2Lab)
175
+ if numpy.mean(crop_vision_frame[:, :, 0]) < 30: #type:ignore[arg-type]
176
+ crop_vision_frame[:, :, 0] = cv2.createCLAHE(clipLimit = 2).apply(crop_vision_frame[:, :, 0])
177
+ crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_Lab2RGB)
178
+ return crop_vision_frame
179
+
180
+
181
+ def estimate_face_landmark_68_5(face_landmark_5 : FaceLandmark5) -> FaceLandmark68:
182
+ affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, 'ffhq_512', (1, 1))
183
+ face_landmark_5 = cv2.transform(face_landmark_5.reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
184
+ face_landmark_68_5 = forward_fan_68_5(face_landmark_5)
185
+ face_landmark_68_5 = cv2.transform(face_landmark_68_5.reshape(1, -1, 2), cv2.invertAffineTransform(affine_matrix)).reshape(-1, 2)
186
+ return face_landmark_68_5
187
+
188
+
189
+ def forward_with_2dfan4(crop_vision_frame : VisionFrame) -> Tuple[Prediction, Prediction]:
190
+ face_landmarker = get_inference_pool().get('2dfan4')
191
+
192
+ with conditional_thread_semaphore():
193
+ prediction = face_landmarker.run(None,
194
+ {
195
+ 'input': [ crop_vision_frame ]
196
+ })
197
+
198
+ return prediction
199
+
200
+
201
+ def forward_with_peppa_wutz(crop_vision_frame : VisionFrame) -> Prediction:
202
+ face_landmarker = get_inference_pool().get('peppa_wutz')
203
+
204
+ with conditional_thread_semaphore():
205
+ prediction = face_landmarker.run(None,
206
+ {
207
+ 'input': crop_vision_frame
208
+ })[0]
209
+
210
+ return prediction
211
+
212
+
213
+ def forward_fan_68_5(face_landmark_5 : FaceLandmark5) -> FaceLandmark68:
214
+ face_landmarker = get_inference_pool().get('fan_68_5')
215
+
216
+ with conditional_thread_semaphore():
217
+ face_landmark_68_5 = face_landmarker.run(None,
218
+ {
219
+ 'input': [ face_landmark_5 ]
220
+ })[0][0]
221
+
222
+ return face_landmark_68_5
facefusion/face_masker.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Tuple
3
+
4
+ import cv2
5
+ import numpy
6
+
7
+ import facefusion.choices
8
+ from facefusion import inference_manager, state_manager
9
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
10
+ from facefusion.filesystem import resolve_relative_path
11
+ from facefusion.thread_helper import conditional_thread_semaphore
12
+ from facefusion.types import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskArea, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame
13
+
14
+
15
+ @lru_cache(maxsize = None)
16
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
17
+ return\
18
+ {
19
+ 'xseg_1':
20
+ {
21
+ 'hashes':
22
+ {
23
+ 'face_occluder':
24
+ {
25
+ 'url': resolve_download_url('models-3.1.0', 'xseg_1.hash'),
26
+ 'path': resolve_relative_path('../.assets/models/xseg_1.hash')
27
+ }
28
+ },
29
+ 'sources':
30
+ {
31
+ 'face_occluder':
32
+ {
33
+ 'url': resolve_download_url('models-3.1.0', 'xseg_1.onnx'),
34
+ 'path': resolve_relative_path('../.assets/models/xseg_1.onnx')
35
+ }
36
+ },
37
+ 'size': (256, 256)
38
+ },
39
+ 'xseg_2':
40
+ {
41
+ 'hashes':
42
+ {
43
+ 'face_occluder':
44
+ {
45
+ 'url': resolve_download_url('models-3.1.0', 'xseg_2.hash'),
46
+ 'path': resolve_relative_path('../.assets/models/xseg_2.hash')
47
+ }
48
+ },
49
+ 'sources':
50
+ {
51
+ 'face_occluder':
52
+ {
53
+ 'url': resolve_download_url('models-3.1.0', 'xseg_2.onnx'),
54
+ 'path': resolve_relative_path('../.assets/models/xseg_2.onnx')
55
+ }
56
+ },
57
+ 'size': (256, 256)
58
+ },
59
+ 'xseg_3':
60
+ {
61
+ 'hashes':
62
+ {
63
+ 'face_occluder':
64
+ {
65
+ 'url': resolve_download_url('models-3.2.0', 'xseg_3.hash'),
66
+ 'path': resolve_relative_path('../.assets/models/xseg_3.hash')
67
+ }
68
+ },
69
+ 'sources':
70
+ {
71
+ 'face_occluder':
72
+ {
73
+ 'url': resolve_download_url('models-3.2.0', 'xseg_3.onnx'),
74
+ 'path': resolve_relative_path('../.assets/models/xseg_3.onnx')
75
+ }
76
+ },
77
+ 'size': (256, 256)
78
+ },
79
+ 'bisenet_resnet_18':
80
+ {
81
+ 'hashes':
82
+ {
83
+ 'face_parser':
84
+ {
85
+ 'url': resolve_download_url('models-3.1.0', 'bisenet_resnet_18.hash'),
86
+ 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.hash')
87
+ }
88
+ },
89
+ 'sources':
90
+ {
91
+ 'face_parser':
92
+ {
93
+ 'url': resolve_download_url('models-3.1.0', 'bisenet_resnet_18.onnx'),
94
+ 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.onnx')
95
+ }
96
+ },
97
+ 'size': (512, 512)
98
+ },
99
+ 'bisenet_resnet_34':
100
+ {
101
+ 'hashes':
102
+ {
103
+ 'face_parser':
104
+ {
105
+ 'url': resolve_download_url('models-3.0.0', 'bisenet_resnet_34.hash'),
106
+ 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.hash')
107
+ }
108
+ },
109
+ 'sources':
110
+ {
111
+ 'face_parser':
112
+ {
113
+ 'url': resolve_download_url('models-3.0.0', 'bisenet_resnet_34.onnx'),
114
+ 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.onnx')
115
+ }
116
+ },
117
+ 'size': (512, 512)
118
+ }
119
+ }
120
+
121
+
122
+ def get_inference_pool() -> InferencePool:
123
+ model_names = [ state_manager.get_item('face_occluder_model'), state_manager.get_item('face_parser_model') ]
124
+ _, model_source_set = collect_model_downloads()
125
+
126
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
127
+
128
+
129
+ def clear_inference_pool() -> None:
130
+ model_names = [ state_manager.get_item('face_occluder_model'), state_manager.get_item('face_parser_model') ]
131
+ inference_manager.clear_inference_pool(__name__, model_names)
132
+
133
+
134
+ def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]:
135
+ model_set = create_static_model_set('full')
136
+ model_hash_set = {}
137
+ model_source_set = {}
138
+
139
+ for face_occluder_model in [ 'xseg_1', 'xseg_2', 'xseg_3' ]:
140
+ if state_manager.get_item('face_occluder_model') == face_occluder_model:
141
+ model_hash_set[face_occluder_model] = model_set.get(face_occluder_model).get('hashes').get('face_occluder')
142
+ model_source_set[face_occluder_model] = model_set.get(face_occluder_model).get('sources').get('face_occluder')
143
+
144
+ for face_parser_model in [ 'bisenet_resnet_18', 'bisenet_resnet_34' ]:
145
+ if state_manager.get_item('face_parser_model') == face_parser_model:
146
+ model_hash_set[face_parser_model] = model_set.get(face_parser_model).get('hashes').get('face_parser')
147
+ model_source_set[face_parser_model] = model_set.get(face_parser_model).get('sources').get('face_parser')
148
+
149
+ return model_hash_set, model_source_set
150
+
151
+
152
+ def pre_check() -> bool:
153
+ model_hash_set, model_source_set = collect_model_downloads()
154
+
155
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
156
+
157
+
158
+ def create_box_mask(crop_vision_frame : VisionFrame, face_mask_blur : float, face_mask_padding : Padding) -> Mask:
159
+ crop_size = crop_vision_frame.shape[:2][::-1]
160
+ blur_amount = int(crop_size[0] * 0.5 * face_mask_blur)
161
+ blur_area = max(blur_amount // 2, 1)
162
+ box_mask : Mask = numpy.ones(crop_size).astype(numpy.float32)
163
+ box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0
164
+ box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0
165
+ box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0
166
+ box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0
167
+
168
+ if blur_amount > 0:
169
+ box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25)
170
+ return box_mask
171
+
172
+
173
+ def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask:
174
+ model_name = state_manager.get_item('face_occluder_model')
175
+ model_size = create_static_model_set('full').get(model_name).get('size')
176
+ prepare_vision_frame = cv2.resize(crop_vision_frame, model_size)
177
+ prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255.0
178
+ prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3)
179
+ occlusion_mask = forward_occlude_face(prepare_vision_frame)
180
+ occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
181
+ occlusion_mask = cv2.resize(occlusion_mask, crop_vision_frame.shape[:2][::-1])
182
+ occlusion_mask = (cv2.GaussianBlur(occlusion_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2
183
+ return occlusion_mask
184
+
185
+
186
+ def create_area_mask(crop_vision_frame : VisionFrame, face_landmark_68 : FaceLandmark68, face_mask_areas : List[FaceMaskArea]) -> Mask:
187
+ crop_size = crop_vision_frame.shape[:2][::-1]
188
+ landmark_points = []
189
+
190
+ for face_mask_area in face_mask_areas:
191
+ if face_mask_area in facefusion.choices.face_mask_area_set:
192
+ landmark_points.extend(facefusion.choices.face_mask_area_set.get(face_mask_area))
193
+
194
+ convex_hull = cv2.convexHull(face_landmark_68[landmark_points].astype(numpy.int32))
195
+ area_mask = numpy.zeros(crop_size).astype(numpy.float32)
196
+ cv2.fillConvexPoly(area_mask, convex_hull, 1.0) # type: ignore[call-overload]
197
+ area_mask = (cv2.GaussianBlur(area_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2
198
+ return area_mask
199
+
200
+
201
+ def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
202
+ model_name = state_manager.get_item('face_parser_model')
203
+ model_size = create_static_model_set('full').get(model_name).get('size')
204
+ prepare_vision_frame = cv2.resize(crop_vision_frame, model_size)
205
+ prepare_vision_frame = prepare_vision_frame[:, :, ::-1].astype(numpy.float32) / 255.0
206
+ prepare_vision_frame = numpy.subtract(prepare_vision_frame, numpy.array([ 0.485, 0.456, 0.406 ]).astype(numpy.float32))
207
+ prepare_vision_frame = numpy.divide(prepare_vision_frame, numpy.array([ 0.229, 0.224, 0.225 ]).astype(numpy.float32))
208
+ prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0)
209
+ prepare_vision_frame = prepare_vision_frame.transpose(0, 3, 1, 2)
210
+ region_mask = forward_parse_face(prepare_vision_frame)
211
+ region_mask = numpy.isin(region_mask.argmax(0), [ facefusion.choices.face_mask_region_set.get(face_mask_region) for face_mask_region in face_mask_regions ])
212
+ region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_vision_frame.shape[:2][::-1])
213
+ region_mask = (cv2.GaussianBlur(region_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2
214
+ return region_mask
215
+
216
+
217
+ def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask:
218
+ model_name = state_manager.get_item('face_occluder_model')
219
+ face_occluder = get_inference_pool().get(model_name)
220
+
221
+ with conditional_thread_semaphore():
222
+ occlusion_mask : Mask = face_occluder.run(None,
223
+ {
224
+ 'input': prepare_vision_frame
225
+ })[0][0]
226
+
227
+ return occlusion_mask
228
+
229
+
230
+ def forward_parse_face(prepare_vision_frame : VisionFrame) -> Mask:
231
+ model_name = state_manager.get_item('face_parser_model')
232
+ face_parser = get_inference_pool().get(model_name)
233
+
234
+ with conditional_thread_semaphore():
235
+ region_mask : Mask = face_parser.run(None,
236
+ {
237
+ 'input': prepare_vision_frame
238
+ })[0][0]
239
+
240
+ return region_mask
facefusion/face_recognizer.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import Tuple
3
+
4
+ import numpy
5
+
6
+ from facefusion import inference_manager
7
+ from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
8
+ from facefusion.face_helper import warp_face_by_face_landmark_5
9
+ from facefusion.filesystem import resolve_relative_path
10
+ from facefusion.thread_helper import conditional_thread_semaphore
11
+ from facefusion.types import DownloadScope, Embedding, FaceLandmark5, InferencePool, ModelOptions, ModelSet, VisionFrame
12
+
13
+
14
+ @lru_cache(maxsize = None)
15
+ def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
16
+ return\
17
+ {
18
+ 'arcface':
19
+ {
20
+ 'hashes':
21
+ {
22
+ 'face_recognizer':
23
+ {
24
+ 'url': resolve_download_url('models-3.0.0', 'arcface_w600k_r50.hash'),
25
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.hash')
26
+ }
27
+ },
28
+ 'sources':
29
+ {
30
+ 'face_recognizer':
31
+ {
32
+ 'url': resolve_download_url('models-3.0.0', 'arcface_w600k_r50.onnx'),
33
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
34
+ }
35
+ },
36
+ 'template': 'arcface_112_v2',
37
+ 'size': (112, 112)
38
+ }
39
+ }
40
+
41
+
42
+ def get_inference_pool() -> InferencePool:
43
+ model_names = [ 'arcface' ]
44
+ model_source_set = get_model_options().get('sources')
45
+
46
+ return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
47
+
48
+
49
+ def clear_inference_pool() -> None:
50
+ model_names = [ 'arcface' ]
51
+ inference_manager.clear_inference_pool(__name__, model_names)
52
+
53
+
54
+ def get_model_options() -> ModelOptions:
55
+ return create_static_model_set('full').get('arcface')
56
+
57
+
58
+ def pre_check() -> bool:
59
+ model_hash_set = get_model_options().get('hashes')
60
+ model_source_set = get_model_options().get('sources')
61
+
62
+ return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
63
+
64
+
65
+ def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Embedding, Embedding]:
66
+ model_template = get_model_options().get('template')
67
+ model_size = get_model_options().get('size')
68
+ crop_vision_frame, matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
69
+ crop_vision_frame = crop_vision_frame / 127.5 - 1
70
+ crop_vision_frame = crop_vision_frame[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32)
71
+ crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)
72
+ embedding = forward(crop_vision_frame)
73
+ embedding = embedding.ravel()
74
+ normed_embedding = embedding / numpy.linalg.norm(embedding)
75
+ return embedding, normed_embedding
76
+
77
+
78
+ def forward(crop_vision_frame : VisionFrame) -> Embedding:
79
+ face_recognizer = get_inference_pool().get('face_recognizer')
80
+
81
+ with conditional_thread_semaphore():
82
+ embedding = face_recognizer.run(None,
83
+ {
84
+ 'input': crop_vision_frame
85
+ })[0]
86
+
87
+ return embedding
facefusion/face_selector.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import numpy
4
+
5
+ from facefusion import state_manager
6
+ from facefusion.types import Face, FaceSelectorOrder, FaceSet, Gender, Race, Score
7
+
8
+ # изменено
9
+ def find_similar_faces(target_faces: List[Face], reference_faces: List[Face], face_distance: float) -> List[Face]:
10
+ similar_faces = []
11
+
12
+ if not reference_faces or not target_faces:
13
+ return similar_faces
14
+
15
+ try:
16
+ for target_face in target_faces:
17
+ for reference_face in reference_faces: # Теперь reference_faces - это просто список
18
+ if hasattr(reference_face, 'embedding') and hasattr(target_face, 'embedding'):
19
+ distance = numpy.linalg.norm(reference_face.embedding - target_face.embedding)
20
+ if distance < face_distance:
21
+ similar_faces.append(target_face)
22
+ break
23
+ except Exception as e:
24
+ print(f"[DEBUG] Error in find_similar_faces: {e}")
25
+
26
+ return similar_faces
27
+
28
+
29
+ def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
30
+ current_face_distance = calc_face_distance(face, reference_face)
31
+ current_face_distance = float(numpy.interp(current_face_distance, [ 0, 2 ], [ 0, 1 ]))
32
+ return current_face_distance < face_distance
33
+
34
+
35
+ def calc_face_distance(face : Face, reference_face : Face) -> float:
36
+ if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
37
+ return 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
38
+ return 0
39
+
40
+
41
+ def sort_and_filter_faces(faces : List[Face]) -> List[Face]:
42
+ if faces:
43
+ if state_manager.get_item('face_selector_order'):
44
+ faces = sort_faces_by_order(faces, state_manager.get_item('face_selector_order'))
45
+ if state_manager.get_item('face_selector_gender'):
46
+ faces = filter_faces_by_gender(faces, state_manager.get_item('face_selector_gender'))
47
+ if state_manager.get_item('face_selector_race'):
48
+ faces = filter_faces_by_race(faces, state_manager.get_item('face_selector_race'))
49
+ if state_manager.get_item('face_selector_age_start') or state_manager.get_item('face_selector_age_end'):
50
+ faces = filter_faces_by_age(faces, state_manager.get_item('face_selector_age_start'), state_manager.get_item('face_selector_age_end'))
51
+ return faces
52
+
53
+
54
+ def sort_faces_by_order(faces : List[Face], order : FaceSelectorOrder) -> List[Face]:
55
+ if order == 'left-right':
56
+ return sorted(faces, key = get_bounding_box_left)
57
+ if order == 'right-left':
58
+ return sorted(faces, key = get_bounding_box_left, reverse = True)
59
+ if order == 'top-bottom':
60
+ return sorted(faces, key = get_bounding_box_top)
61
+ if order == 'bottom-top':
62
+ return sorted(faces, key = get_bounding_box_top, reverse = True)
63
+ if order == 'small-large':
64
+ return sorted(faces, key = get_bounding_box_area)
65
+ if order == 'large-small':
66
+ return sorted(faces, key = get_bounding_box_area, reverse = True)
67
+ if order == 'best-worst':
68
+ return sorted(faces, key = get_face_detector_score, reverse = True)
69
+ if order == 'worst-best':
70
+ return sorted(faces, key = get_face_detector_score)
71
+ return faces
72
+
73
+
74
+ def get_bounding_box_left(face : Face) -> float:
75
+ return face.bounding_box[0]
76
+
77
+
78
+ def get_bounding_box_top(face : Face) -> float:
79
+ return face.bounding_box[1]
80
+
81
+
82
+ def get_bounding_box_area(face : Face) -> float:
83
+ return (face.bounding_box[2] - face.bounding_box[0]) * (face.bounding_box[3] - face.bounding_box[1])
84
+
85
+
86
+ def get_face_detector_score(face : Face) -> Score:
87
+ return face.score_set.get('detector')
88
+
89
+
90
+ def filter_faces_by_gender(faces : List[Face], gender : Gender) -> List[Face]:
91
+ filter_faces = []
92
+
93
+ for face in faces:
94
+ if face.gender == gender:
95
+ filter_faces.append(face)
96
+ return filter_faces
97
+
98
+
99
+ def filter_faces_by_age(faces : List[Face], face_selector_age_start : int, face_selector_age_end : int) -> List[Face]:
100
+ filter_faces = []
101
+ age = range(face_selector_age_start, face_selector_age_end)
102
+
103
+ for face in faces:
104
+ if set(face.age) & set(age):
105
+ filter_faces.append(face)
106
+ return filter_faces
107
+
108
+
109
+ def filter_faces_by_race(faces : List[Face], race : Race) -> List[Face]:
110
+ filter_faces = []
111
+
112
+ for face in faces:
113
+ if face.race == race:
114
+ filter_faces.append(face)
115
+ return filter_faces
facefusion/face_store.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from facefusion.hash_helper import create_hash
4
+ from facefusion.types import Face, FaceSet, FaceStore, VisionFrame
5
+
6
+ FACE_STORE : FaceStore =\
7
+ {
8
+ 'static_faces': {},
9
+ 'reference_faces': {}
10
+ }
11
+
12
+
13
+ def get_face_store() -> FaceStore:
14
+ return FACE_STORE
15
+
16
+
17
+ def get_static_faces(vision_frame : VisionFrame) -> Optional[List[Face]]:
18
+ vision_area = crop_vision_area(vision_frame)
19
+ vision_hash = create_hash(vision_area.tobytes())
20
+ if vision_hash in FACE_STORE['static_faces']:
21
+ return FACE_STORE['static_faces'][vision_hash]
22
+ return None
23
+
24
+
25
+ def set_static_faces(vision_frame : VisionFrame, faces : List[Face]) -> None:
26
+ vision_area = crop_vision_area(vision_frame)
27
+ vision_hash = create_hash(vision_area.tobytes())
28
+ if vision_hash:
29
+ FACE_STORE['static_faces'][vision_hash] = faces
30
+
31
+
32
+ def clear_static_faces() -> None:
33
+ FACE_STORE['static_faces'].clear()
34
+
35
+ # изменили
36
+ def get_reference_faces() -> List[Face]:
37
+ return reference_faces
38
+
39
+ reference_faces: List[Face] = []
40
+
41
+ def append_reference_face(face: Face) -> None:
42
+ global reference_faces
43
+ if face is not None:
44
+ reference_faces.append(face)
45
+ # print(f"[DEBUG] Added reference face, total: {len(reference_faces)}")
46
+
47
+
48
+ def clear_reference_faces() -> None:
49
+ global reference_faces
50
+ reference_faces = []
51
+ # print("[DEBUG] Cleared reference faces")
52
+
53
+
54
+ def crop_vision_area(vision_frame : VisionFrame) -> VisionFrame:
55
+ height, width = vision_frame.shape[:2]
56
+ center_y, center_x = height // 2, width // 2
57
+ vision_area = vision_frame[center_y - 16 : center_y + 16, center_x - 16 : center_x + 16]
58
+ return vision_area
facefusion/ffmpeg.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import tempfile
4
+ from functools import partial
5
+ from typing import List, Optional, cast
6
+
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.choices
10
+ from facefusion import ffmpeg_builder, logger, process_manager, state_manager, wording
11
+ from facefusion.filesystem import get_file_format, remove_file
12
+ from facefusion.types import AudioBuffer, AudioEncoder, Commands, EncoderSet, Fps, UpdateProgress, VideoEncoder, VideoFormat
13
+ from facefusion.vision import detect_video_duration, detect_video_fps, predict_video_frame_total
14
+
15
+ # добавили
16
+ from facefusion.common_helper import is_windows
17
+ import cv2
18
+ import numpy as np
19
+ from facefusion.temp_helper import get_temp_file_path, get_temp_frames_pattern
20
+ from facefusion import logger, state_manager
21
+
22
+
23
+ def run_ffmpeg_with_progress(commands : Commands, update_progress : UpdateProgress) -> subprocess.Popen[bytes]:
24
+ log_level = state_manager.get_item('log_level')
25
+ commands.extend(ffmpeg_builder.set_progress())
26
+ commands.extend(ffmpeg_builder.cast_stream())
27
+ commands = ffmpeg_builder.run(commands)
28
+ process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
29
+
30
+ while process_manager.is_processing():
31
+ try:
32
+
33
+ while __line__ := process.stdout.readline().decode().lower():
34
+ if 'frame=' in __line__:
35
+ _, frame_number = __line__.split('frame=')
36
+ update_progress(int(frame_number))
37
+
38
+ if log_level == 'debug':
39
+ log_debug(process)
40
+ process.wait(timeout = 0.5)
41
+ except subprocess.TimeoutExpired:
42
+ continue
43
+ return process
44
+
45
+ if process_manager.is_stopping():
46
+ process.terminate()
47
+ return process
48
+
49
+
50
+ def update_progress(progress : tqdm, frame_number : int) -> None:
51
+ progress.update(frame_number - progress.n)
52
+
53
+
54
+ def run_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
55
+ log_level = state_manager.get_item('log_level')
56
+ commands = ffmpeg_builder.run(commands)
57
+ process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
58
+
59
+ while process_manager.is_processing():
60
+ try:
61
+ if log_level == 'debug':
62
+ log_debug(process)
63
+ process.wait(timeout = 0.5)
64
+ except subprocess.TimeoutExpired:
65
+ continue
66
+ return process
67
+
68
+ if process_manager.is_stopping():
69
+ process.terminate()
70
+ return process
71
+
72
+
73
+ def open_ffmpeg(commands : Commands) -> subprocess.Popen[bytes]:
74
+ commands = ffmpeg_builder.run(commands)
75
+ return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
76
+
77
+
78
+ def log_debug(process : subprocess.Popen[bytes]) -> None:
79
+ _, stderr = process.communicate()
80
+ errors = stderr.decode().split(os.linesep)
81
+
82
+ for error in errors:
83
+ if error.strip():
84
+ logger.debug(error.strip(), __name__)
85
+
86
+
87
+ def get_available_encoder_set() -> EncoderSet:
88
+ available_encoder_set : EncoderSet =\
89
+ {
90
+ 'audio': [],
91
+ 'video': []
92
+ }
93
+ commands = ffmpeg_builder.chain(
94
+ ffmpeg_builder.get_encoders()
95
+ )
96
+ process = run_ffmpeg(commands)
97
+
98
+ while line := process.stdout.readline().decode().lower():
99
+ if line.startswith(' a'):
100
+ audio_encoder = line.split()[1]
101
+
102
+ if audio_encoder in facefusion.choices.output_audio_encoders:
103
+ index = facefusion.choices.output_audio_encoders.index(audio_encoder) #type:ignore[arg-type]
104
+ available_encoder_set['audio'].insert(index, audio_encoder) #type:ignore[arg-type]
105
+ if line.startswith(' v'):
106
+ video_encoder = line.split()[1]
107
+
108
+ if video_encoder in facefusion.choices.output_video_encoders:
109
+ index = facefusion.choices.output_video_encoders.index(video_encoder) #type:ignore[arg-type]
110
+ available_encoder_set['video'].insert(index, video_encoder) #type:ignore[arg-type]
111
+
112
+ return available_encoder_set
113
+
114
+
115
+ def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool:
116
+ extract_frame_total = predict_video_frame_total(target_path, temp_video_fps, trim_frame_start, trim_frame_end)
117
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d')
118
+ commands = ffmpeg_builder.chain(
119
+ ffmpeg_builder.set_input(target_path),
120
+ ffmpeg_builder.set_media_resolution(temp_video_resolution),
121
+ ffmpeg_builder.set_frame_quality(0),
122
+ ffmpeg_builder.select_frame_range(trim_frame_start, trim_frame_end, temp_video_fps),
123
+ ffmpeg_builder.prevent_frame_drop(),
124
+ ffmpeg_builder.set_output(temp_frames_pattern)
125
+ )
126
+
127
+ with tqdm(total = extract_frame_total, desc = wording.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
128
+ process = run_ffmpeg_with_progress(commands, partial(update_progress, progress))
129
+ return process.returncode == 0
130
+
131
+
132
+
133
+ def copy_image(target_path : str, temp_image_resolution : str) -> bool:
134
+ temp_image_path = get_temp_file_path(target_path) # Это temp.png
135
+ commands = ffmpeg_builder.chain(
136
+ ffmpeg_builder.set_input(target_path),
137
+ ffmpeg_builder.set_media_resolution(temp_image_resolution),
138
+
139
+ # ИЗМЕНИТЕ ЭТУ СТРОКУ: вместо target_path используйте temp_image_path
140
+ ffmpeg_builder.set_image_quality(temp_image_path, 100),
141
+
142
+ ffmpeg_builder.force_output(temp_image_path)
143
+ )
144
+ return run_ffmpeg(commands).returncode == 0
145
+
146
+
147
+ def finalize_image(target_path: str,
148
+ output_path: str,
149
+ output_image_resolution: str) -> str:
150
+ """
151
+ Сохраняет temp.png (который мы получили после обработки) в
152
+ lossless PNG с расширением .png и возвращает путь к итоговому файлу.
153
+ При ошибке возвращает "".
154
+ """
155
+ try:
156
+ # 1) Текущий temp.png после всех процессоров
157
+ temp_png = get_temp_file_path(target_path)
158
+ if not os.path.isfile(temp_png):
159
+ logger.error(f"[finalize_image] temp not found: {temp_png}", __name__)
160
+ return ""
161
+
162
+ # 2) Читаем temp.png
163
+ if is_windows():
164
+ buf = np.fromfile(temp_png, dtype=np.uint8)
165
+ img = cv2.imdecode(buf, cv2.IMREAD_UNCHANGED)
166
+ else:
167
+ img = cv2.imread(temp_png, cv2.IMREAD_UNCHANGED)
168
+
169
+ if img is None:
170
+ logger.error(f"[finalize_image] failed to load {temp_png}", __name__)
171
+ return ""
172
+
173
+ # (ресайз по желанию — обычно не нужен)
174
+ # w, h = map(int, output_image_resolution.split('x'))
175
+ # img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
176
+
177
+ # 3) Готовим итоговый путь с .png
178
+ base, _ = os.path.splitext(output_path)
179
+ final_path = base + ".png"
180
+
181
+ # 4) Убедимся, что папка есть
182
+ os.makedirs(os.path.dirname(final_path) or ".", exist_ok=True)
183
+
184
+ # 5) Сохраняем lossless PNG (0 = без сжатия)
185
+ ok = cv2.imwrite(final_path, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
186
+ if not ok:
187
+ logger.error(f"[finalize_image] cv2.imwrite failed: {final_path}", __name__)
188
+ return ""
189
+
190
+
191
+
192
+ logger.info(f"[finalize_image] saved: {final_path}", __name__)
193
+ return final_path
194
+
195
+ except Exception as e:
196
+ logger.error(f"[finalize_image] exception: {e}", __name__)
197
+ return ""
198
+
199
+
200
+ def read_audio_buffer(target_path : str, audio_sample_rate : int, audio_sample_size : int, audio_channel_total : int) -> Optional[AudioBuffer]:
201
+ commands = ffmpeg_builder.chain(
202
+ ffmpeg_builder.set_input(target_path),
203
+ ffmpeg_builder.ignore_video_stream(),
204
+ ffmpeg_builder.set_audio_sample_rate(audio_sample_rate),
205
+ ffmpeg_builder.set_audio_sample_size(audio_sample_size),
206
+ ffmpeg_builder.set_audio_channel_total(audio_channel_total),
207
+ ffmpeg_builder.cast_stream()
208
+ )
209
+
210
+ process = open_ffmpeg(commands)
211
+ audio_buffer, _ = process.communicate()
212
+ if process.returncode == 0:
213
+ return audio_buffer
214
+ return None
215
+
216
+
217
+ def restore_audio(target_path : str, output_path : str, trim_frame_start : int, trim_frame_end : int) -> bool:
218
+ output_audio_encoder = state_manager.get_item('output_audio_encoder')
219
+ output_audio_quality = state_manager.get_item('output_audio_quality')
220
+ output_audio_volume = state_manager.get_item('output_audio_volume')
221
+ target_video_fps = detect_video_fps(target_path)
222
+ temp_video_path = get_temp_file_path(target_path)
223
+ temp_video_format = cast(VideoFormat, get_file_format(temp_video_path))
224
+ temp_video_duration = detect_video_duration(temp_video_path)
225
+
226
+ output_audio_encoder = fix_audio_encoder(temp_video_format, output_audio_encoder)
227
+ commands = ffmpeg_builder.chain(
228
+ ffmpeg_builder.set_input(temp_video_path),
229
+ ffmpeg_builder.select_media_range(trim_frame_start, trim_frame_end, target_video_fps),
230
+ ffmpeg_builder.set_input(target_path),
231
+ ffmpeg_builder.copy_video_encoder(),
232
+ ffmpeg_builder.set_audio_encoder(output_audio_encoder),
233
+ ffmpeg_builder.set_audio_quality(output_audio_encoder, output_audio_quality),
234
+ ffmpeg_builder.set_audio_volume(output_audio_volume),
235
+ ffmpeg_builder.select_media_stream('0:v:0'),
236
+ ffmpeg_builder.select_media_stream('1:a:0'),
237
+ ffmpeg_builder.set_video_duration(temp_video_duration),
238
+ ffmpeg_builder.force_output(output_path)
239
+ )
240
+ return run_ffmpeg(commands).returncode == 0
241
+
242
+
243
+ def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool:
244
+ output_audio_encoder = state_manager.get_item('output_audio_encoder')
245
+ output_audio_quality = state_manager.get_item('output_audio_quality')
246
+ output_audio_volume = state_manager.get_item('output_audio_volume')
247
+ temp_video_path = get_temp_file_path(target_path)
248
+ temp_video_format = cast(VideoFormat, get_file_format(temp_video_path))
249
+ temp_video_duration = detect_video_duration(temp_video_path)
250
+
251
+ output_audio_encoder = fix_audio_encoder(temp_video_format, output_audio_encoder)
252
+ commands = ffmpeg_builder.chain(
253
+ ffmpeg_builder.set_input(temp_video_path),
254
+ ffmpeg_builder.set_input(audio_path),
255
+ ffmpeg_builder.copy_video_encoder(),
256
+ ffmpeg_builder.set_audio_encoder(output_audio_encoder),
257
+ ffmpeg_builder.set_audio_quality(output_audio_encoder, output_audio_quality),
258
+ ffmpeg_builder.set_audio_volume(output_audio_volume),
259
+ ffmpeg_builder.set_video_duration(temp_video_duration),
260
+ ffmpeg_builder.force_output(output_path)
261
+ )
262
+ return run_ffmpeg(commands).returncode == 0
263
+
264
+
265
+ def merge_video(target_path : str, temp_video_fps : Fps, output_video_resolution : str, output_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool:
266
+ output_video_encoder = state_manager.get_item('output_video_encoder')
267
+ output_video_quality = state_manager.get_item('output_video_quality')
268
+ output_video_preset = state_manager.get_item('output_video_preset')
269
+ merge_frame_total = predict_video_frame_total(target_path, output_video_fps, trim_frame_start, trim_frame_end)
270
+ temp_video_path = get_temp_file_path(target_path)
271
+ temp_video_format = cast(VideoFormat, get_file_format(temp_video_path))
272
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d')
273
+
274
+ output_video_encoder = fix_video_encoder(temp_video_format, output_video_encoder)
275
+ commands = ffmpeg_builder.chain(
276
+ ffmpeg_builder.set_input_fps(temp_video_fps),
277
+ ffmpeg_builder.set_input(temp_frames_pattern),
278
+ ffmpeg_builder.set_media_resolution(output_video_resolution),
279
+ ffmpeg_builder.set_video_encoder(output_video_encoder),
280
+ ffmpeg_builder.set_video_quality(output_video_encoder, output_video_quality),
281
+ ffmpeg_builder.set_video_preset(output_video_encoder, output_video_preset),
282
+ ffmpeg_builder.set_video_fps(output_video_fps),
283
+ ffmpeg_builder.set_pixel_format(output_video_encoder),
284
+ ffmpeg_builder.set_video_colorspace('bt709'),
285
+ ffmpeg_builder.force_output(temp_video_path)
286
+ )
287
+
288
+ with tqdm(total = merge_frame_total, desc = wording.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress:
289
+ process = run_ffmpeg_with_progress(commands, partial(update_progress, progress))
290
+ return process.returncode == 0
291
+
292
+
293
+ def concat_video(output_path : str, temp_output_paths : List[str]) -> bool:
294
+ concat_video_path = tempfile.mktemp()
295
+
296
+ with open(concat_video_path, 'w') as concat_video_file:
297
+ for temp_output_path in temp_output_paths:
298
+ concat_video_file.write('file \'' + os.path.abspath(temp_output_path) + '\'' + os.linesep)
299
+ concat_video_file.flush()
300
+ concat_video_file.close()
301
+
302
+ output_path = os.path.abspath(output_path)
303
+ commands = ffmpeg_builder.chain(
304
+ ffmpeg_builder.unsafe_concat(),
305
+ ffmpeg_builder.set_input(concat_video_file.name),
306
+ ffmpeg_builder.copy_video_encoder(),
307
+ ffmpeg_builder.copy_audio_encoder(),
308
+ ffmpeg_builder.force_output(output_path)
309
+ )
310
+ process = run_ffmpeg(commands)
311
+ process.communicate()
312
+ remove_file(concat_video_path)
313
+ return process.returncode == 0
314
+
315
+
316
+ def fix_audio_encoder(video_format : VideoFormat, audio_encoder : AudioEncoder) -> AudioEncoder:
317
+ if video_format == 'avi' and audio_encoder == 'libopus':
318
+ return 'aac'
319
+ if video_format == 'm4v':
320
+ return 'aac'
321
+ if video_format == 'mov' and audio_encoder in [ 'flac', 'libopus' ]:
322
+ return 'aac'
323
+ if video_format == 'webm':
324
+ return 'libopus'
325
+ return audio_encoder
326
+
327
+
328
+ def fix_video_encoder(video_format : VideoFormat, video_encoder : VideoEncoder) -> VideoEncoder:
329
+ if video_format == 'm4v':
330
+ return 'libx264'
331
+ if video_format in [ 'mkv', 'mp4' ] and video_encoder == 'rawvideo':
332
+ return 'libx264'
333
+ if video_format == 'mov' and video_encoder == 'libvpx-vp9':
334
+ return 'libx264'
335
+ if video_format == 'webm':
336
+ return 'libvpx-vp9'
337
+ return video_encoder
facefusion/ffmpeg_builder.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import shutil
3
+ from typing import Optional
4
+
5
+ import numpy
6
+
7
+ from facefusion.filesystem import get_file_format
8
+ from facefusion.types import AudioEncoder, Commands, Duration, Fps, StreamMode, VideoEncoder, VideoPreset
9
+
10
+
11
+ def run(commands : Commands) -> Commands:
12
+ return [ shutil.which('ffmpeg'), '-loglevel', 'error' ] + commands
13
+
14
+
15
+ def chain(*commands : Commands) -> Commands:
16
+ return list(itertools.chain(*commands))
17
+
18
+
19
+ def get_encoders() -> Commands:
20
+ return [ '-encoders' ]
21
+
22
+
23
+ def set_hardware_accelerator(value : str) -> Commands:
24
+ return [ '-hwaccel', value ]
25
+
26
+
27
+ def set_progress() -> Commands:
28
+ return [ '-progress' ]
29
+
30
+
31
+ def set_input(input_path : str) -> Commands:
32
+ return [ '-i', input_path ]
33
+
34
+
35
+ def set_input_fps(input_fps : Fps) -> Commands:
36
+ return [ '-r', str(input_fps)]
37
+
38
+
39
+ def set_output(output_path : str) -> Commands:
40
+ return [ output_path ]
41
+
42
+
43
+ def force_output(output_path : str) -> Commands:
44
+ return [ '-y', output_path ]
45
+
46
+
47
+ def cast_stream() -> Commands:
48
+ return [ '-' ]
49
+
50
+
51
+ def set_stream_mode(stream_mode : StreamMode) -> Commands:
52
+ if stream_mode == 'udp':
53
+ return [ '-f', 'mpegts' ]
54
+ if stream_mode == 'v4l2':
55
+ return [ '-f', 'v4l2' ]
56
+ return []
57
+
58
+
59
+ def set_stream_quality(stream_quality : int) -> Commands:
60
+ return [ '-b:v', str(stream_quality) + 'k' ]
61
+
62
+
63
+ def unsafe_concat() -> Commands:
64
+ return [ '-f', 'concat', '-safe', '0' ]
65
+
66
+
67
+ def set_pixel_format(video_encoder : VideoEncoder) -> Commands:
68
+ if video_encoder == 'rawvideo':
69
+ return [ '-pix_fmt', 'rgb24' ]
70
+ return [ '-pix_fmt', 'yuv420p' ]
71
+
72
+
73
+ def set_frame_quality(frame_quality : int) -> Commands:
74
+ return [ '-q:v', str(frame_quality) ]
75
+
76
+
77
+ def select_frame_range(frame_start : int, frame_end : int, video_fps : Fps) -> Commands:
78
+ if isinstance(frame_start, int) and isinstance(frame_end, int):
79
+ return [ '-vf', 'trim=start_frame=' + str(frame_start) + ':end_frame=' + str(frame_end) + ',fps=' + str(video_fps) ]
80
+ if isinstance(frame_start, int):
81
+ return [ '-vf', 'trim=start_frame=' + str(frame_start) + ',fps=' + str(video_fps) ]
82
+ if isinstance(frame_end, int):
83
+ return [ '-vf', 'trim=end_frame=' + str(frame_end) + ',fps=' + str(video_fps) ]
84
+ return [ '-vf', 'fps=' + str(video_fps) ]
85
+
86
+
87
+ def prevent_frame_drop() -> Commands:
88
+ return [ '-vsync', '0' ]
89
+
90
+
91
+ def select_media_range(frame_start : int, frame_end : int, media_fps : Fps) -> Commands:
92
+ commands = []
93
+
94
+ if isinstance(frame_start, int):
95
+ commands.extend([ '-ss', str(frame_start / media_fps) ])
96
+ if isinstance(frame_end, int):
97
+ commands.extend([ '-to', str(frame_end / media_fps) ])
98
+ return commands
99
+
100
+
101
+ def select_media_stream(media_stream : str) -> Commands:
102
+ return [ '-map', media_stream ]
103
+
104
+
105
+ def set_media_resolution(video_resolution : str) -> Commands:
106
+ return [ '-s', video_resolution ]
107
+
108
+
109
+ def set_image_quality(image_path : str, image_quality : int) -> Commands:
110
+ file_format = get_file_format(image_path)
111
+
112
+ # === ПРАВИЛЬНАЯ ОБРАБОТКА PNG ===
113
+ if file_format == 'png':
114
+ # Конвертируем качество FaceFusion (100=лучшее) в уровень сжатия FFmpeg (0=лучшее/быстрое).
115
+ png_compression = round(numpy.interp(image_quality, [ 0, 100 ], [ 9, 0 ]))
116
+ # Возвращаем правильную команду для PNG
117
+ return [ '-compression_level', str(png_compression) ]
118
+ # ===========================
119
+
120
+ if file_format == 'webp':
121
+ image_compression = image_quality
122
+ return [ '-q:v', str(image_compression) ]
123
+ else:
124
+ # Логика для JPEG (остается как было)
125
+ image_compression = round(31 - (image_quality * 0.31))
126
+ return [ '-q:v', str(image_compression) ]
127
+
128
+
129
+ def set_audio_encoder(audio_codec : str) -> Commands:
130
+ return [ '-c:a', audio_codec ]
131
+
132
+
133
+ def copy_audio_encoder() -> Commands:
134
+ return set_audio_encoder('copy')
135
+
136
+
137
+ def set_audio_sample_rate(audio_sample_rate : int) -> Commands:
138
+ return [ '-ar', str(audio_sample_rate) ]
139
+
140
+
141
+ def set_audio_sample_size(audio_sample_size : int) -> Commands:
142
+ if audio_sample_size == 16:
143
+ return [ '-f', 's16le' ]
144
+ if audio_sample_size == 32:
145
+ return [ '-f', 's32le' ]
146
+ return []
147
+
148
+
149
+ def set_audio_channel_total(audio_channel_total : int) -> Commands:
150
+ return [ '-ac', str(audio_channel_total) ]
151
+
152
+
153
+ def set_audio_quality(audio_encoder : AudioEncoder, audio_quality : int) -> Commands:
154
+ if audio_encoder == 'aac':
155
+ audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ 0.1, 2.0 ]), 1)
156
+ return [ '-q:a', str(audio_compression) ]
157
+ if audio_encoder == 'libmp3lame':
158
+ audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ 9, 0 ]))
159
+ return [ '-q:a', str(audio_compression) ]
160
+ if audio_encoder == 'libopus':
161
+ audio_bit_rate = round(numpy.interp(audio_quality, [ 0, 100 ], [ 64, 256 ]))
162
+ return [ '-b:a', str(audio_bit_rate) + 'k' ]
163
+ if audio_encoder == 'libvorbis':
164
+ audio_compression = round(numpy.interp(audio_quality, [ 0, 100 ], [ -1, 10 ]), 1)
165
+ return [ '-q:a', str(audio_compression) ]
166
+ return []
167
+
168
+
169
+ def set_audio_volume(audio_volume : int) -> Commands:
170
+ return [ '-filter:a', 'volume=' + str(audio_volume / 100) ]
171
+
172
+
173
+ def set_video_encoder(video_encoder : str) -> Commands:
174
+ return [ '-c:v', video_encoder ]
175
+
176
+
177
+ def copy_video_encoder() -> Commands:
178
+ return set_video_encoder('copy')
179
+
180
+
181
+ def set_video_quality(video_encoder : VideoEncoder, video_quality : int) -> Commands:
182
+ if video_encoder in [ 'libx264', 'libx265' ]:
183
+ video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
184
+ return [ '-crf', str(video_compression) ]
185
+ if video_encoder == 'libvpx-vp9':
186
+ video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 63, 0 ]))
187
+ return [ '-crf', str(video_compression) ]
188
+ if video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
189
+ video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
190
+ return [ '-cq', str(video_compression) ]
191
+ if video_encoder in [ 'h264_amf', 'hevc_amf' ]:
192
+ video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
193
+ return [ '-qp_i', str(video_compression), '-qp_p', str(video_compression), '-qp_b', str(video_compression) ]
194
+ if video_encoder in [ 'h264_qsv', 'hevc_qsv' ]:
195
+ video_compression = round(numpy.interp(video_quality, [ 0, 100 ], [ 51, 0 ]))
196
+ return [ '-qp', str(video_compression) ]
197
+ if video_encoder in [ 'h264_videotoolbox', 'hevc_videotoolbox' ]:
198
+ video_bit_rate = round(numpy.interp(video_quality, [ 0, 100 ], [ 1024, 50512 ]))
199
+ return [ '-b:v', str(video_bit_rate) + 'k' ]
200
+ return []
201
+
202
+
203
+ def set_video_preset(video_encoder : VideoEncoder, video_preset : VideoPreset) -> Commands:
204
+ if video_encoder in [ 'libx264', 'libx265' ]:
205
+ return [ '-preset', video_preset ]
206
+ if video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
207
+ return [ '-preset', map_nvenc_preset(video_preset) ]
208
+ if video_encoder in [ 'h264_amf', 'hevc_amf' ]:
209
+ return [ '-quality', map_amf_preset(video_preset) ]
210
+ if video_encoder in [ 'h264_qsv', 'hevc_qsv' ]:
211
+ return [ '-preset', map_qsv_preset(video_preset) ]
212
+ return []
213
+
214
+
215
+ def set_video_colorspace(video_colorspace : str) -> Commands:
216
+ return [ '-colorspace', video_colorspace ]
217
+
218
+
219
+ def set_video_fps(video_fps : Fps) -> Commands:
220
+ return [ '-vf', 'framerate=fps=' + str(video_fps) ]
221
+
222
+
223
+ def set_video_duration(video_duration : Duration) -> Commands:
224
+ return [ '-t', str(video_duration) ]
225
+
226
+
227
+ def capture_video() -> Commands:
228
+ return [ '-f', 'rawvideo', '-pix_fmt', 'rgb24' ]
229
+
230
+
231
+ def ignore_video_stream() -> Commands:
232
+ return [ '-vn' ]
233
+
234
+
235
+ def map_nvenc_preset(video_preset : VideoPreset) -> Optional[str]:
236
+ if video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]:
237
+ return 'fast'
238
+ if video_preset == 'medium':
239
+ return 'medium'
240
+ if video_preset in [ 'slow', 'slower', 'veryslow' ]:
241
+ return 'slow'
242
+ return None
243
+
244
+
245
+ def map_amf_preset(video_preset : VideoPreset) -> Optional[str]:
246
+ if video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]:
247
+ return 'speed'
248
+ if video_preset in [ 'faster', 'fast', 'medium' ]:
249
+ return 'balanced'
250
+ if video_preset in [ 'slow', 'slower', 'veryslow' ]:
251
+ return 'quality'
252
+ return None
253
+
254
+
255
+ def map_qsv_preset(video_preset : VideoPreset) -> Optional[str]:
256
+ if video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]:
257
+ return 'veryfast'
258
+ if video_preset in [ 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]:
259
+ return video_preset
260
+ return None
facefusion/filesystem.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import shutil
4
+ from typing import List, Optional
5
+
6
+ import facefusion.choices
7
+
8
+
9
+ def get_file_size(file_path : str) -> int:
10
+ if is_file(file_path):
11
+ return os.path.getsize(file_path)
12
+ return 0
13
+
14
+
15
+ def get_file_name(file_path : str) -> Optional[str]:
16
+ file_name, _ = os.path.splitext(os.path.basename(file_path))
17
+
18
+ if file_name:
19
+ return file_name
20
+ return None
21
+
22
+
23
+ def get_file_extension(file_path : str) -> Optional[str]:
24
+ _, file_extension = os.path.splitext(file_path)
25
+
26
+ if file_extension:
27
+ return file_extension.lower()
28
+ return None
29
+
30
+
31
+ def get_file_format(file_path : str) -> Optional[str]:
32
+ file_extension = get_file_extension(file_path)
33
+
34
+ if file_extension:
35
+ if file_extension == '.jpg':
36
+ return 'jpeg'
37
+ if file_extension == '.tif':
38
+ return 'tiff'
39
+ return file_extension.lstrip('.')
40
+ return None
41
+
42
+
43
+ def same_file_extension(first_file_path : str, second_file_path : str) -> bool:
44
+ """
45
+ Отключаем проверку расширений целевого и выходного файлов.
46
+ Всегда возвращаем True, чтобы модули не падали из-за разных расширений.
47
+ """
48
+ return True
49
+
50
+
51
+ def is_file(file_path : str) -> bool:
52
+ if file_path:
53
+ return os.path.isfile(file_path)
54
+ return False
55
+
56
+
57
+ def is_audio(audio_path : str) -> bool:
58
+ return is_file(audio_path) and get_file_format(audio_path) in facefusion.choices.audio_formats
59
+
60
+
61
+ def has_audio(audio_paths : List[str]) -> bool:
62
+ if audio_paths:
63
+ return any(map(is_audio, audio_paths))
64
+ return False
65
+
66
+
67
+ def are_audios(audio_paths : List[str]) -> bool:
68
+ if audio_paths:
69
+ return all(map(is_audio, audio_paths))
70
+ return False
71
+
72
+
73
+ def is_image(image_path : str) -> bool:
74
+ # Проверяем, что путь не None
75
+ if image_path is None:
76
+ return False
77
+
78
+ # Проверяем, существует ли файл
79
+ if not os.path.isfile(image_path):
80
+ return False
81
+
82
+ # Проверяем расширение
83
+ ext = os.path.splitext(image_path)[1].lower()
84
+ if ext in ['.jpg', '.jpeg', '.png', '.webp']:
85
+ return True
86
+
87
+ # Если расширение не подходит, проверяем содержимое файла
88
+ try:
89
+ with open(image_path, 'rb') as f:
90
+ header = f.read(8)
91
+ # Проверяем сигнатуру PNG
92
+ if header.startswith(b'\x89PNG\r\n\x1a\n'):
93
+ return True
94
+ except:
95
+ pass
96
+
97
+ return False
98
+
99
+
100
+ def has_image(image_paths : List[str]) -> bool:
101
+ if image_paths:
102
+ return any(is_image(image_path) for image_path in image_paths)
103
+ return False
104
+
105
+
106
+ def are_images(image_paths : List[str]) -> bool:
107
+ if image_paths:
108
+ return all(map(is_image, image_paths))
109
+ return False
110
+
111
+
112
+ def is_video(video_path : str) -> bool:
113
+ return is_file(video_path) and get_file_format(video_path) in facefusion.choices.video_formats
114
+
115
+
116
+ def has_video(video_paths : List[str]) -> bool:
117
+ if video_paths:
118
+ return any(map(is_video, video_paths))
119
+ return False
120
+
121
+
122
+ def are_videos(video_paths : List[str]) -> bool:
123
+ if video_paths:
124
+ return any(map(is_video, video_paths))
125
+ return False
126
+
127
+
128
+ def filter_audio_paths(paths : List[str]) -> List[str]:
129
+ if paths:
130
+ return [ path for path in paths if is_audio(path) ]
131
+ return []
132
+
133
+
134
+ def filter_image_paths(paths : List[str]) -> List[str]:
135
+ if paths:
136
+ return [ path for path in paths if is_image(path) ]
137
+ return []
138
+
139
+
140
+ def copy_file(file_path : str, move_path : str) -> bool:
141
+ if is_file(file_path):
142
+ shutil.copy(file_path, move_path)
143
+ return is_file(move_path)
144
+ return False
145
+
146
+
147
+ def move_file(file_path : str, move_path : str) -> bool:
148
+ if is_file(file_path):
149
+ shutil.move(file_path, move_path)
150
+ return not is_file(file_path) and is_file(move_path)
151
+ return False
152
+
153
+
154
+ def remove_file(file_path : str) -> bool:
155
+ if is_file(file_path):
156
+ os.remove(file_path)
157
+ return not is_file(file_path)
158
+ return False
159
+
160
+
161
+ def resolve_file_paths(directory_path : str) -> List[str]:
162
+ file_paths : List[str] = []
163
+
164
+ if is_directory(directory_path):
165
+ file_names_and_extensions = sorted(os.listdir(directory_path))
166
+
167
+ for file_name_and_extension in file_names_and_extensions:
168
+ if not file_name_and_extension.startswith(('.', '__')):
169
+ file_path = os.path.join(directory_path, file_name_and_extension)
170
+ file_paths.append(file_path)
171
+
172
+ return file_paths
173
+
174
+
175
+ def resolve_file_pattern(file_pattern : str) -> List[str]:
176
+ if in_directory(file_pattern):
177
+ return sorted(glob.glob(file_pattern))
178
+ return []
179
+
180
+
181
+ def is_directory(directory_path : str) -> bool:
182
+ if directory_path:
183
+ return os.path.isdir(directory_path)
184
+ return False
185
+
186
+
187
+ def in_directory(file_path : str) -> bool:
188
+ if file_path:
189
+ directory_path = os.path.dirname(file_path)
190
+ if directory_path:
191
+ return not is_directory(file_path) and is_directory(directory_path)
192
+ return False
193
+
194
+
195
+ def create_directory(directory_path : str) -> bool:
196
+ if directory_path and not is_file(directory_path):
197
+ os.makedirs(directory_path, exist_ok = True)
198
+ return is_directory(directory_path)
199
+ return False
200
+
201
+
202
+ def remove_directory(directory_path : str) -> bool:
203
+ if is_directory(directory_path):
204
+ shutil.rmtree(directory_path, ignore_errors = True)
205
+ return not is_directory(directory_path)
206
+ return False
207
+
208
+
209
+ def resolve_relative_path(path : str) -> str:
210
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
facefusion/hash_helper.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zlib
3
+ from typing import Optional
4
+
5
+ from facefusion.filesystem import get_file_name, is_file
6
+
7
+
8
+ def create_hash(content : bytes) -> str:
9
+ return format(zlib.crc32(content), '08x')
10
+
11
+
12
+ def validate_hash(validate_path : str) -> bool:
13
+ hash_path = get_hash_path(validate_path)
14
+
15
+ if is_file(hash_path):
16
+ with open(hash_path) as hash_file:
17
+ hash_content = hash_file.read()
18
+
19
+ with open(validate_path, 'rb') as validate_file:
20
+ validate_content = validate_file.read()
21
+
22
+ return create_hash(validate_content) == hash_content
23
+ return False
24
+
25
+
26
+ def get_hash_path(validate_path : str) -> Optional[str]:
27
+ if is_file(validate_path):
28
+ validate_directory_path, file_name_and_extension = os.path.split(validate_path)
29
+ validate_file_name = get_file_name(file_name_and_extension)
30
+
31
+ return os.path.join(validate_directory_path, validate_file_name + '.hash')
32
+ return None
facefusion/inference_manager.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from time import sleep
3
+ from typing import List
4
+
5
+ from onnxruntime import InferenceSession
6
+
7
+ from facefusion import process_manager, state_manager
8
+ from facefusion.app_context import detect_app_context
9
+ from facefusion.execution import create_inference_session_providers
10
+ from facefusion.filesystem import is_file
11
+ from facefusion.types import DownloadSet, ExecutionProvider, InferencePool, InferencePoolSet
12
+
13
+ INFERENCE_POOL_SET : InferencePoolSet =\
14
+ {
15
+ 'cli': {},
16
+ 'ui': {}
17
+ }
18
+
19
+
20
+ def get_inference_pool(module_name : str, model_names : List[str], model_source_set : DownloadSet) -> InferencePool:
21
+ while process_manager.is_checking():
22
+ sleep(0.5)
23
+ execution_device_id = state_manager.get_item('execution_device_id')
24
+ execution_providers = resolve_execution_providers(module_name)
25
+ app_context = detect_app_context()
26
+ inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
27
+
28
+ if app_context == 'cli' and INFERENCE_POOL_SET.get('ui').get(inference_context):
29
+ INFERENCE_POOL_SET['cli'][inference_context] = INFERENCE_POOL_SET.get('ui').get(inference_context)
30
+ if app_context == 'ui' and INFERENCE_POOL_SET.get('cli').get(inference_context):
31
+ INFERENCE_POOL_SET['ui'][inference_context] = INFERENCE_POOL_SET.get('cli').get(inference_context)
32
+ if not INFERENCE_POOL_SET.get(app_context).get(inference_context):
33
+ INFERENCE_POOL_SET[app_context][inference_context] = create_inference_pool(model_source_set, execution_device_id, execution_providers)
34
+
35
+ return INFERENCE_POOL_SET.get(app_context).get(inference_context)
36
+
37
+
38
+ def create_inference_pool(model_source_set : DownloadSet, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferencePool:
39
+ inference_pool : InferencePool = {}
40
+
41
+ for model_name in model_source_set.keys():
42
+ model_path = model_source_set.get(model_name).get('path')
43
+ if is_file(model_path):
44
+ inference_pool[model_name] = create_inference_session(model_path, execution_device_id, execution_providers)
45
+
46
+ return inference_pool
47
+
48
+
49
+ def clear_inference_pool(module_name : str, model_names : List[str]) -> None:
50
+ execution_device_id = state_manager.get_item('execution_device_id')
51
+ execution_providers = resolve_execution_providers(module_name)
52
+ app_context = detect_app_context()
53
+ inference_context = get_inference_context(module_name, model_names, execution_device_id, execution_providers)
54
+
55
+ if INFERENCE_POOL_SET.get(app_context).get(inference_context):
56
+ del INFERENCE_POOL_SET[app_context][inference_context]
57
+
58
+
59
+ def create_inference_session(model_path : str, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferenceSession:
60
+ inference_session_providers = create_inference_session_providers(execution_device_id, execution_providers)
61
+ return InferenceSession(model_path, providers = inference_session_providers)
62
+
63
+
64
+ def get_inference_context(module_name : str, model_names : List[str], execution_device_id : str, execution_providers : List[ExecutionProvider]) -> str:
65
+ inference_context = '.'.join([ module_name ] + model_names + [ execution_device_id ] + list(execution_providers))
66
+ return inference_context
67
+
68
+
69
+ def resolve_execution_providers(module_name : str) -> List[ExecutionProvider]:
70
+ module = importlib.import_module(module_name)
71
+
72
+ if hasattr(module, 'resolve_execution_providers'):
73
+ return getattr(module, 'resolve_execution_providers')()
74
+ return state_manager.get_item('execution_providers')
facefusion/installer.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import signal
4
+ import subprocess
5
+ import sys
6
+ from argparse import ArgumentParser, HelpFormatter
7
+ from functools import partial
8
+ from types import FrameType
9
+
10
+ from facefusion import metadata, wording
11
+ from facefusion.common_helper import is_linux, is_windows
12
+
13
+ ONNXRUNTIME_SET =\
14
+ {
15
+ 'default': ('onnxruntime', '1.22.0')
16
+ }
17
+ if is_windows() or is_linux():
18
+ ONNXRUNTIME_SET['cuda'] = ('onnxruntime-gpu', '1.22.0')
19
+ ONNXRUNTIME_SET['openvino'] = ('onnxruntime-openvino', '1.22.0')
20
+ if is_windows():
21
+ ONNXRUNTIME_SET['directml'] = ('onnxruntime-directml', '1.17.3')
22
+ if is_linux():
23
+ ONNXRUNTIME_SET['rocm'] = ('onnxruntime-rocm', '1.21.0')
24
+
25
+
26
+ def cli() -> None:
27
+ signal.signal(signal.SIGINT, signal_exit)
28
+ program = ArgumentParser(formatter_class = partial(HelpFormatter, max_help_position = 50))
29
+ program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIME_SET.keys(), required = True)
30
+ program.add_argument('--skip-conda', help = wording.get('help.skip_conda'), action = 'store_true')
31
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
32
+ run(program)
33
+
34
+
35
+ def signal_exit(signum : int, frame : FrameType) -> None:
36
+ sys.exit(0)
37
+
38
+
39
+ def run(program : ArgumentParser) -> None:
40
+ args = program.parse_args()
41
+ has_conda = 'CONDA_PREFIX' in os.environ
42
+ onnxruntime_name, onnxruntime_version = ONNXRUNTIME_SET.get(args.onnxruntime)
43
+
44
+ if not args.skip_conda and not has_conda:
45
+ sys.stdout.write(wording.get('conda_not_activated') + os.linesep)
46
+ sys.exit(1)
47
+
48
+ with open('requirements.txt') as file:
49
+
50
+ for line in file.readlines():
51
+ __line__ = line.strip()
52
+ if not __line__.startswith('onnxruntime'):
53
+ subprocess.call([ shutil.which('pip'), 'install', line, '--force-reinstall' ])
54
+
55
+ if args.onnxruntime == 'rocm':
56
+ python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
57
+
58
+ if python_id in [ 'cp310', 'cp312' ]:
59
+ wheel_name = 'onnxruntime_rocm-' + onnxruntime_version + '-' + python_id + '-' + python_id + '-linux_x86_64.whl'
60
+ wheel_url = 'https://repo.radeon.com/rocm/manylinux/rocm-rel-6.4/' + wheel_name
61
+ subprocess.call([ shutil.which('pip'), 'install', wheel_url, '--force-reinstall' ])
62
+ else:
63
+ subprocess.call([ shutil.which('pip'), 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ])
64
+
65
+ if args.onnxruntime == 'cuda' and has_conda:
66
+ library_paths = []
67
+
68
+ if is_linux():
69
+ if os.getenv('LD_LIBRARY_PATH'):
70
+ library_paths = os.getenv('LD_LIBRARY_PATH').split(os.pathsep)
71
+
72
+ python_id = 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor)
73
+ library_paths.extend(
74
+ [
75
+ os.path.join(os.getenv('CONDA_PREFIX'), 'lib'),
76
+ os.path.join(os.getenv('CONDA_PREFIX'), 'lib', python_id, 'site-packages', 'tensorrt_libs')
77
+ ])
78
+ library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ]))
79
+
80
+ subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'LD_LIBRARY_PATH=' + os.pathsep.join(library_paths) ])
81
+
82
+ if is_windows():
83
+ if os.getenv('PATH'):
84
+ library_paths = os.getenv('PATH').split(os.pathsep)
85
+
86
+ library_paths.extend(
87
+ [
88
+ os.path.join(os.getenv('CONDA_PREFIX'), 'Lib'),
89
+ os.path.join(os.getenv('CONDA_PREFIX'), 'Lib', 'site-packages', 'tensorrt_libs')
90
+ ])
91
+ library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ]))
92
+
93
+ subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ])
94
+
95
+ if args.onnxruntime == 'directml':
96
+ subprocess.call([ shutil.which('pip'), 'install', 'numpy==1.26.4', '--force-reinstall' ])
facefusion/jobs/__init__.py ADDED
File without changes
facefusion/jobs/job_helper.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+ from typing import Optional
4
+
5
+ from facefusion.filesystem import get_file_extension, get_file_name
6
+
7
+
8
+ def get_step_output_path(job_id : str, step_index : int, output_path : str) -> Optional[str]:
9
+ if output_path:
10
+ output_directory_path, _ = os.path.split(output_path)
11
+ output_file_name = get_file_name(_)
12
+ output_file_extension = get_file_extension(_)
13
+ return os.path.join(output_directory_path, output_file_name + '-' + job_id + '-' + str(step_index) + output_file_extension)
14
+ return None
15
+
16
+
17
+ def suggest_job_id(job_prefix : str = 'job') -> str:
18
+ return job_prefix + '-' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
facefusion/jobs/job_list.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from typing import Optional, Tuple
3
+
4
+ from facefusion.date_helper import describe_time_ago
5
+ from facefusion.jobs import job_manager
6
+ from facefusion.types import JobStatus, TableContents, TableHeaders
7
+
8
+
9
+ def compose_job_list(job_status : JobStatus) -> Tuple[TableHeaders, TableContents]:
10
+ jobs = job_manager.find_jobs(job_status)
11
+ job_headers : TableHeaders = [ 'job id', 'steps', 'date created', 'date updated', 'job status' ]
12
+ job_contents : TableContents = []
13
+
14
+ for index, job_id in enumerate(jobs):
15
+ if job_manager.validate_job(job_id):
16
+ job = jobs[job_id]
17
+ step_total = job_manager.count_step_total(job_id)
18
+ date_created = prepare_describe_datetime(job.get('date_created'))
19
+ date_updated = prepare_describe_datetime(job.get('date_updated'))
20
+ job_contents.append(
21
+ [
22
+ job_id,
23
+ step_total,
24
+ date_created,
25
+ date_updated,
26
+ job_status
27
+ ])
28
+ return job_headers, job_contents
29
+
30
+
31
+ def prepare_describe_datetime(date_time : Optional[str]) -> Optional[str]:
32
+ if date_time:
33
+ return describe_time_ago(datetime.fromisoformat(date_time))
34
+ return None
facefusion/jobs/job_manager.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from copy import copy
3
+ from typing import List, Optional
4
+
5
+ import facefusion.choices
6
+ from facefusion.date_helper import get_current_date_time
7
+ from facefusion.filesystem import create_directory, get_file_name, is_directory, is_file, move_file, remove_directory, remove_file, resolve_file_pattern
8
+ from facefusion.jobs.job_helper import get_step_output_path
9
+ from facefusion.json import read_json, write_json
10
+ from facefusion.types import Args, Job, JobSet, JobStatus, JobStep, JobStepStatus
11
+
12
+ JOBS_PATH : Optional[str] = None
13
+
14
+
15
+ def init_jobs(jobs_path : str) -> bool:
16
+ global JOBS_PATH
17
+
18
+ JOBS_PATH = jobs_path
19
+ job_status_paths = [ os.path.join(JOBS_PATH, job_status) for job_status in facefusion.choices.job_statuses ]
20
+
21
+ for job_status_path in job_status_paths:
22
+ create_directory(job_status_path)
23
+ return all(is_directory(status_path) for status_path in job_status_paths)
24
+
25
+
26
+ def clear_jobs(jobs_path : str) -> bool:
27
+ return remove_directory(jobs_path)
28
+
29
+
30
+ def create_job(job_id : str) -> bool:
31
+ job : Job =\
32
+ {
33
+ 'version': '1',
34
+ 'date_created': get_current_date_time().isoformat(),
35
+ 'date_updated': None,
36
+ 'steps': []
37
+ }
38
+
39
+ return create_job_file(job_id, job)
40
+
41
+
42
+ def submit_job(job_id : str) -> bool:
43
+ drafted_job_ids = find_job_ids('drafted')
44
+ steps = get_steps(job_id)
45
+
46
+ if job_id in drafted_job_ids and steps:
47
+ return set_steps_status(job_id, 'queued') and move_job_file(job_id, 'queued')
48
+ return False
49
+
50
+
51
+ def submit_jobs(halt_on_error : bool) -> bool:
52
+ drafted_job_ids = find_job_ids('drafted')
53
+ has_error = False
54
+
55
+ if drafted_job_ids:
56
+ for job_id in drafted_job_ids:
57
+ if not submit_job(job_id):
58
+ has_error = True
59
+ if halt_on_error:
60
+ return False
61
+ return not has_error
62
+ return False
63
+
64
+
65
+ def delete_job(job_id : str) -> bool:
66
+ return delete_job_file(job_id)
67
+
68
+
69
+ def delete_jobs(halt_on_error : bool) -> bool:
70
+ job_ids = find_job_ids('drafted') + find_job_ids('queued') + find_job_ids('failed') + find_job_ids('completed')
71
+ has_error = False
72
+
73
+ if job_ids:
74
+ for job_id in job_ids:
75
+ if not delete_job(job_id):
76
+ has_error = True
77
+ if halt_on_error:
78
+ return False
79
+ return not has_error
80
+ return False
81
+
82
+
83
+ def find_jobs(job_status : JobStatus) -> JobSet:
84
+ job_ids = find_job_ids(job_status)
85
+ job_set : JobSet = {}
86
+
87
+ for job_id in job_ids:
88
+ job_set[job_id] = read_job_file(job_id)
89
+ return job_set
90
+
91
+
92
+ def find_job_ids(job_status : JobStatus) -> List[str]:
93
+ job_pattern = os.path.join(JOBS_PATH, job_status, '*.json')
94
+ job_paths = resolve_file_pattern(job_pattern)
95
+ job_paths.sort(key = os.path.getmtime)
96
+ job_ids = []
97
+
98
+ for job_path in job_paths:
99
+ job_id = get_file_name(job_path)
100
+ job_ids.append(job_id)
101
+ return job_ids
102
+
103
+
104
+ def validate_job(job_id : str) -> bool:
105
+ job = read_job_file(job_id)
106
+ return bool(job and 'version' in job and 'date_created' in job and 'date_updated' in job and 'steps' in job)
107
+
108
+
109
+ def has_step(job_id : str, step_index : int) -> bool:
110
+ step_total = count_step_total(job_id)
111
+ return step_index in range(step_total)
112
+
113
+
114
+ def add_step(job_id : str, step_args : Args) -> bool:
115
+ job = read_job_file(job_id)
116
+
117
+ if job:
118
+ job.get('steps').append(
119
+ {
120
+ 'args': step_args,
121
+ 'status': 'drafted'
122
+ })
123
+ return update_job_file(job_id, job)
124
+ return False
125
+
126
+
127
+ def remix_step(job_id : str, step_index : int, step_args : Args) -> bool:
128
+ steps = get_steps(job_id)
129
+ step_args = copy(step_args)
130
+
131
+ if step_index and step_index < 0:
132
+ step_index = count_step_total(job_id) - 1
133
+
134
+ if has_step(job_id, step_index):
135
+ output_path = steps[step_index].get('args').get('output_path')
136
+ step_args['target_path'] = get_step_output_path(job_id, step_index, output_path)
137
+ return add_step(job_id, step_args)
138
+ return False
139
+
140
+
141
+ def insert_step(job_id : str, step_index : int, step_args : Args) -> bool:
142
+ job = read_job_file(job_id)
143
+ step_args = copy(step_args)
144
+
145
+ if step_index and step_index < 0:
146
+ step_index = count_step_total(job_id) - 1
147
+
148
+ if job and has_step(job_id, step_index):
149
+ job.get('steps').insert(step_index,
150
+ {
151
+ 'args': step_args,
152
+ 'status': 'drafted'
153
+ })
154
+ return update_job_file(job_id, job)
155
+ return False
156
+
157
+
158
+ def remove_step(job_id : str, step_index : int) -> bool:
159
+ job = read_job_file(job_id)
160
+
161
+ if step_index and step_index < 0:
162
+ step_index = count_step_total(job_id) - 1
163
+
164
+ if job and has_step(job_id, step_index):
165
+ job.get('steps').pop(step_index)
166
+ return update_job_file(job_id, job)
167
+ return False
168
+
169
+
170
+ def get_steps(job_id : str) -> List[JobStep]:
171
+ job = read_job_file(job_id)
172
+
173
+ if job:
174
+ return job.get('steps')
175
+ return []
176
+
177
+
178
+ def count_step_total(job_id : str) -> int:
179
+ steps = get_steps(job_id)
180
+
181
+ if steps:
182
+ return len(steps)
183
+ return 0
184
+
185
+
186
+ def set_step_status(job_id : str, step_index : int, step_status : JobStepStatus) -> bool:
187
+ job = read_job_file(job_id)
188
+
189
+ if job:
190
+ steps = job.get('steps')
191
+ if has_step(job_id, step_index):
192
+ steps[step_index]['status'] = step_status
193
+ return update_job_file(job_id, job)
194
+ return False
195
+
196
+
197
+ def set_steps_status(job_id : str, step_status : JobStepStatus) -> bool:
198
+ job = read_job_file(job_id)
199
+
200
+ if job:
201
+ for step in job.get('steps'):
202
+ step['status'] = step_status
203
+ return update_job_file(job_id, job)
204
+ return False
205
+
206
+
207
+ def read_job_file(job_id : str) -> Optional[Job]:
208
+ job_path = find_job_path(job_id)
209
+ return read_json(job_path) #type:ignore[return-value]
210
+
211
+
212
+ def create_job_file(job_id : str, job : Job) -> bool:
213
+ job_path = find_job_path(job_id)
214
+
215
+ if not is_file(job_path):
216
+ job_create_path = suggest_job_path(job_id, 'drafted')
217
+ return write_json(job_create_path, job) #type:ignore[arg-type]
218
+ return False
219
+
220
+
221
+ def update_job_file(job_id : str, job : Job) -> bool:
222
+ job_path = find_job_path(job_id)
223
+
224
+ if is_file(job_path):
225
+ job['date_updated'] = get_current_date_time().isoformat()
226
+ return write_json(job_path, job) #type:ignore[arg-type]
227
+ return False
228
+
229
+
230
+ def move_job_file(job_id : str, job_status : JobStatus) -> bool:
231
+ job_path = find_job_path(job_id)
232
+ job_move_path = suggest_job_path(job_id, job_status)
233
+ return move_file(job_path, job_move_path)
234
+
235
+
236
+ def delete_job_file(job_id : str) -> bool:
237
+ job_path = find_job_path(job_id)
238
+ return remove_file(job_path)
239
+
240
+
241
+ def suggest_job_path(job_id : str, job_status : JobStatus) -> Optional[str]:
242
+ job_file_name = get_job_file_name(job_id)
243
+
244
+ if job_file_name:
245
+ return os.path.join(JOBS_PATH, job_status, job_file_name)
246
+ return None
247
+
248
+
249
+ def find_job_path(job_id : str) -> Optional[str]:
250
+ job_file_name = get_job_file_name(job_id)
251
+
252
+ if job_file_name:
253
+ for job_status in facefusion.choices.job_statuses:
254
+ job_pattern = os.path.join(JOBS_PATH, job_status, job_file_name)
255
+ job_paths = resolve_file_pattern(job_pattern)
256
+
257
+ for job_path in job_paths:
258
+ return job_path
259
+ return None
260
+
261
+
262
+ def get_job_file_name(job_id : str) -> Optional[str]:
263
+ if job_id:
264
+ return job_id + '.json'
265
+ return None
facefusion/jobs/job_runner.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from facefusion.ffmpeg import concat_video
2
+ from facefusion.filesystem import are_images, are_videos, move_file, remove_file
3
+ from facefusion.jobs import job_helper, job_manager
4
+ from facefusion.types import JobOutputSet, JobStep, ProcessStep
5
+
6
+
7
+ def run_job(job_id : str, process_step : ProcessStep) -> bool:
8
+ queued_job_ids = job_manager.find_job_ids('queued')
9
+
10
+ if job_id in queued_job_ids:
11
+ if run_steps(job_id, process_step) and finalize_steps(job_id):
12
+ clean_steps(job_id)
13
+ return job_manager.move_job_file(job_id, 'completed')
14
+ clean_steps(job_id)
15
+ job_manager.move_job_file(job_id, 'failed')
16
+ return False
17
+
18
+
19
+ def run_jobs(process_step : ProcessStep, halt_on_error : bool) -> bool:
20
+ queued_job_ids = job_manager.find_job_ids('queued')
21
+ has_error = False
22
+
23
+ if queued_job_ids:
24
+ for job_id in queued_job_ids:
25
+ if not run_job(job_id, process_step):
26
+ has_error = True
27
+ if halt_on_error:
28
+ return False
29
+ return not has_error
30
+ return False
31
+
32
+
33
+ def retry_job(job_id : str, process_step : ProcessStep) -> bool:
34
+ failed_job_ids = job_manager.find_job_ids('failed')
35
+
36
+ if job_id in failed_job_ids:
37
+ return job_manager.set_steps_status(job_id, 'queued') and job_manager.move_job_file(job_id, 'queued') and run_job(job_id, process_step)
38
+ return False
39
+
40
+
41
+ def retry_jobs(process_step : ProcessStep, halt_on_error : bool) -> bool:
42
+ failed_job_ids = job_manager.find_job_ids('failed')
43
+ has_error = False
44
+
45
+ if failed_job_ids:
46
+ for job_id in failed_job_ids:
47
+ if not retry_job(job_id, process_step):
48
+ has_error = True
49
+ if halt_on_error:
50
+ return False
51
+ return not has_error
52
+ return False
53
+
54
+
55
+ def run_step(job_id : str, step_index : int, step : JobStep, process_step : ProcessStep) -> bool:
56
+ step_args = step.get('args')
57
+
58
+ if job_manager.set_step_status(job_id, step_index, 'started') and process_step(job_id, step_index, step_args):
59
+ output_path = step_args.get('output_path')
60
+ step_output_path = job_helper.get_step_output_path(job_id, step_index, output_path)
61
+
62
+ return move_file(output_path, step_output_path) and job_manager.set_step_status(job_id, step_index, 'completed')
63
+ job_manager.set_step_status(job_id, step_index, 'failed')
64
+ return False
65
+
66
+
67
+ def run_steps(job_id : str, process_step : ProcessStep) -> bool:
68
+ steps = job_manager.get_steps(job_id)
69
+
70
+ if steps:
71
+ for index, step in enumerate(steps):
72
+ if not run_step(job_id, index, step, process_step):
73
+ return False
74
+ return True
75
+ return False
76
+
77
+
78
+ def finalize_steps(job_id : str) -> bool:
79
+ output_set = collect_output_set(job_id)
80
+
81
+ for output_path, temp_output_paths in output_set.items():
82
+ if are_videos(temp_output_paths):
83
+ if not concat_video(output_path, temp_output_paths):
84
+ return False
85
+ if are_images(temp_output_paths):
86
+ for temp_output_path in temp_output_paths:
87
+ if not move_file(temp_output_path, output_path):
88
+ return False
89
+ return True
90
+
91
+
92
+ def clean_steps(job_id: str) -> bool:
93
+ output_set = collect_output_set(job_id)
94
+
95
+ for temp_output_paths in output_set.values():
96
+ for temp_output_path in temp_output_paths:
97
+ if not remove_file(temp_output_path):
98
+ return False
99
+ return True
100
+
101
+
102
+ def collect_output_set(job_id : str) -> JobOutputSet:
103
+ steps = job_manager.get_steps(job_id)
104
+ job_output_set : JobOutputSet = {}
105
+
106
+ for index, step in enumerate(steps):
107
+ output_path = step.get('args').get('output_path')
108
+
109
+ if output_path:
110
+ step_output_path = job_manager.get_step_output_path(job_id, index, output_path)
111
+ job_output_set.setdefault(output_path, []).append(step_output_path)
112
+ return job_output_set
facefusion/jobs/job_store.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from facefusion.types import JobStore
4
+
5
+ JOB_STORE : JobStore =\
6
+ {
7
+ 'job_keys': [],
8
+ 'step_keys': []
9
+ }
10
+
11
+
12
+ def get_job_keys() -> List[str]:
13
+ return JOB_STORE.get('job_keys')
14
+
15
+
16
+ def get_step_keys() -> List[str]:
17
+ return JOB_STORE.get('step_keys')
18
+
19
+
20
+ def register_job_keys(step_keys : List[str]) -> None:
21
+ for step_key in step_keys:
22
+ JOB_STORE['job_keys'].append(step_key)
23
+
24
+
25
+ def register_step_keys(job_keys : List[str]) -> None:
26
+ for job_key in job_keys:
27
+ JOB_STORE['step_keys'].append(job_key)
facefusion/json.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from json import JSONDecodeError
3
+ from typing import Optional
4
+
5
+ from facefusion.filesystem import is_file
6
+ from facefusion.types import Content
7
+
8
+
9
+ def read_json(json_path : str) -> Optional[Content]:
10
+ if is_file(json_path):
11
+ try:
12
+ with open(json_path) as json_file:
13
+ return json.load(json_file)
14
+ except JSONDecodeError:
15
+ pass
16
+ return None
17
+
18
+
19
+ def write_json(json_path : str, content : Content) -> bool:
20
+ with open(json_path, 'w') as json_file:
21
+ json.dump(content, json_file, indent = 4)
22
+ return is_file(json_path)
facefusion/logger.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from logging import Logger, basicConfig, getLogger
2
+
3
+ import facefusion.choices
4
+ from facefusion.common_helper import get_first, get_last
5
+ from facefusion.types import LogLevel
6
+
7
+
8
+ def init(log_level : LogLevel) -> None:
9
+ basicConfig(format = '%(message)s')
10
+ get_package_logger().setLevel(facefusion.choices.log_level_set.get(log_level))
11
+
12
+
13
+ def get_package_logger() -> Logger:
14
+ return getLogger('facefusion')
15
+
16
+
17
+ def debug(message : str, module_name : str) -> None:
18
+ get_package_logger().debug(create_message(message, module_name))
19
+
20
+
21
+ def info(message : str, module_name : str) -> None:
22
+ get_package_logger().info(create_message(message, module_name))
23
+
24
+
25
+ def warn(message : str, module_name : str) -> None:
26
+ get_package_logger().warning(create_message(message, module_name))
27
+
28
+
29
+ def error(message : str, module_name : str) -> None:
30
+ get_package_logger().error(create_message(message, module_name))
31
+
32
+
33
+ def create_message(message : str, module_name : str) -> str:
34
+ module_names = module_name.split('.')
35
+ first_module_name = get_first(module_names)
36
+ last_module_name = get_last(module_names)
37
+
38
+ if first_module_name and last_module_name:
39
+ return '[' + first_module_name.upper() + '.' + last_module_name.upper() + '] ' + message
40
+ return message
41
+
42
+
43
+ def enable() -> None:
44
+ get_package_logger().disabled = False
45
+
46
+
47
+ def disable() -> None:
48
+ get_package_logger().disabled = True
facefusion/memory.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from facefusion.common_helper import is_macos, is_windows
2
+
3
+ if is_windows():
4
+ import ctypes
5
+ else:
6
+ import resource
7
+
8
+
9
+ def limit_system_memory(system_memory_limit : int = 1) -> bool:
10
+ if is_macos():
11
+ system_memory_limit = system_memory_limit * (1024 ** 6)
12
+ else:
13
+ system_memory_limit = system_memory_limit * (1024 ** 3)
14
+ try:
15
+ if is_windows():
16
+ ctypes.windll.kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(system_memory_limit), ctypes.c_size_t(system_memory_limit)) #type:ignore[attr-defined]
17
+ else:
18
+ resource.setrlimit(resource.RLIMIT_DATA, (system_memory_limit, system_memory_limit))
19
+ return True
20
+ except Exception:
21
+ return False
facefusion/metadata.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ METADATA =\
4
+ {
5
+ 'name': 'FaceFusion',
6
+ 'description': 'Industry leading face manipulation platform',
7
+ 'version': '3.3.2-Mod',
8
+ 'license': 'OpenRAIL-AS',
9
+ 'author': 'Henry Ruhs',
10
+ 'url': 'https://facefusion.io'
11
+ }
12
+
13
+
14
+ def get(key : str) -> Optional[str]:
15
+ if key in METADATA:
16
+ return METADATA.get(key)
17
+ return None