Update app.py
Browse files
app.py
CHANGED
|
@@ -46,19 +46,14 @@ hf_hub_download(repo_id="vladmandic/insightface-faceanalysis", filename="buffalo
|
|
| 46 |
|
| 47 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 48 |
"""Returns the value at the given index of a sequence or mapping.
|
| 49 |
-
|
| 50 |
If the object is a sequence (like list or string), returns the value at the given index.
|
| 51 |
If the object is a mapping (like a dictionary), returns the value at the index-th key.
|
| 52 |
-
|
| 53 |
Some return a dictionary, in these cases, we look for the "results" key
|
| 54 |
-
|
| 55 |
Args:
|
| 56 |
obj (Union[Sequence, Mapping]): The object to retrieve the value from.
|
| 57 |
index (int): The index of the value to retrieve.
|
| 58 |
-
|
| 59 |
Returns:
|
| 60 |
Any: The value at the given index.
|
| 61 |
-
|
| 62 |
Raises:
|
| 63 |
IndexError: If the index is out of bounds for the object and the object is not a mapping.
|
| 64 |
"""
|
|
@@ -130,7 +125,6 @@ add_extra_model_paths()
|
|
| 130 |
|
| 131 |
def import_custom_nodes() -> None:
|
| 132 |
"""Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
|
| 133 |
-
|
| 134 |
This function sets up a new asyncio event loop, initializes the PromptServer,
|
| 135 |
creates a PromptQueue, and initializes the custom nodes.
|
| 136 |
"""
|
|
@@ -258,7 +252,7 @@ if __name__ == "__main__":
|
|
| 258 |
|
| 259 |
# Вторая группа (обратите внимание — она должна быть на том же уровне, что и первая)
|
| 260 |
with gr.Group():
|
| 261 |
-
input_video = gr.File(label="Target Video (Body)",type="filepath")
|
| 262 |
select_every_nth = gr.Dropdown(choices=[1, 2], value=1, label='"1" = choose every frame, "2" - every second frame')
|
| 263 |
loop_count = gr.Dropdown(choices=[0, 1, 2, 3, 4], value=0, label='"Loop_Count" = repeat loop append to your video')
|
| 264 |
pingpong_checkbox = gr.Checkbox(label='"Pingpong" = reverse append to your video', value=False)
|
|
@@ -279,11 +273,8 @@ if __name__ == "__main__":
|
|
| 279 |
***Hyperswap_1b_256.onnx is the best (in most cases) - but model has inner bug - sometimes they produce "FAIL" swap (working, but do not do any swapping - on SOME faces. So, for stability - do test image to image face swap on one screenshot from your video first). Most stable version is Hyperswap_1a_256.onnx.
|
| 280 |
|
| 281 |
***Target_Face_Index: Index_0 = First Face. To switch for another target face - switch to Index_1, Index_2, e.t.c.
|
| 282 |
-
|
| 283 |
***Note: "1" or "2" - 'every frame' or 'every second frame' - if you have video 60fps or 48 fps - you can choose "2" to select every 2nd frame - for two time reduce total number of frames in video (got 30 fps and 24 fps video, accordingly).
|
| 284 |
-
|
| 285 |
***Video 05 sec * 24 fps = 120 frames/720p = takes 275 sec (4.5 min) for generating. Overall: SECONDS --> MINUTES.
|
| 286 |
-
|
| 287 |
***To cancel job - just close your browser's page.
|
| 288 |
|
| 289 |
***If needed, use AdvancedLivePortrait to correct faces on video before swapping. Here is [workflow](https://openart.ai/workflows/ocelot_vibrant_0/advanced-liveportrait-for-video-as-source/hV07PExjpK3JEd6kNnkr) for ComfyUI.
|
|
@@ -306,4 +297,4 @@ if __name__ == "__main__":
|
|
| 306 |
outputs=[output_video]
|
| 307 |
)
|
| 308 |
|
| 309 |
-
app.launch(server_name="0.0.0.0",server_port=7860,ssr_mode=False)
|
|
|
|
| 46 |
|
| 47 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 48 |
"""Returns the value at the given index of a sequence or mapping.
|
|
|
|
| 49 |
If the object is a sequence (like list or string), returns the value at the given index.
|
| 50 |
If the object is a mapping (like a dictionary), returns the value at the index-th key.
|
|
|
|
| 51 |
Some return a dictionary, in these cases, we look for the "results" key
|
|
|
|
| 52 |
Args:
|
| 53 |
obj (Union[Sequence, Mapping]): The object to retrieve the value from.
|
| 54 |
index (int): The index of the value to retrieve.
|
|
|
|
| 55 |
Returns:
|
| 56 |
Any: The value at the given index.
|
|
|
|
| 57 |
Raises:
|
| 58 |
IndexError: If the index is out of bounds for the object and the object is not a mapping.
|
| 59 |
"""
|
|
|
|
| 125 |
|
| 126 |
def import_custom_nodes() -> None:
|
| 127 |
"""Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
|
|
|
|
| 128 |
This function sets up a new asyncio event loop, initializes the PromptServer,
|
| 129 |
creates a PromptQueue, and initializes the custom nodes.
|
| 130 |
"""
|
|
|
|
| 252 |
|
| 253 |
# Вторая группа (обратите внимание — она должна быть на том же уровне, что и первая)
|
| 254 |
with gr.Group():
|
| 255 |
+
input_video = gr.File(label="Target Video (Body)",file_types=[".mp4", ".gif"],type="filepath")
|
| 256 |
select_every_nth = gr.Dropdown(choices=[1, 2], value=1, label='"1" = choose every frame, "2" - every second frame')
|
| 257 |
loop_count = gr.Dropdown(choices=[0, 1, 2, 3, 4], value=0, label='"Loop_Count" = repeat loop append to your video')
|
| 258 |
pingpong_checkbox = gr.Checkbox(label='"Pingpong" = reverse append to your video', value=False)
|
|
|
|
| 273 |
***Hyperswap_1b_256.onnx is the best (in most cases) - but model has inner bug - sometimes they produce "FAIL" swap (working, but do not do any swapping - on SOME faces. So, for stability - do test image to image face swap on one screenshot from your video first). Most stable version is Hyperswap_1a_256.onnx.
|
| 274 |
|
| 275 |
***Target_Face_Index: Index_0 = First Face. To switch for another target face - switch to Index_1, Index_2, e.t.c.
|
|
|
|
| 276 |
***Note: "1" or "2" - 'every frame' or 'every second frame' - if you have video 60fps or 48 fps - you can choose "2" to select every 2nd frame - for two time reduce total number of frames in video (got 30 fps and 24 fps video, accordingly).
|
|
|
|
| 277 |
***Video 05 sec * 24 fps = 120 frames/720p = takes 275 sec (4.5 min) for generating. Overall: SECONDS --> MINUTES.
|
|
|
|
| 278 |
***To cancel job - just close your browser's page.
|
| 279 |
|
| 280 |
***If needed, use AdvancedLivePortrait to correct faces on video before swapping. Here is [workflow](https://openart.ai/workflows/ocelot_vibrant_0/advanced-liveportrait-for-video-as-source/hV07PExjpK3JEd6kNnkr) for ComfyUI.
|
|
|
|
| 297 |
outputs=[output_video]
|
| 298 |
)
|
| 299 |
|
| 300 |
+
app.launch(server_name="0.0.0.0",server_port=7860,ssr_mode=False)
|