Spaces:
Sleeping
Sleeping
Add actual GPU time tracking logs
Browse filesAdded timing logs to track actual GPU time spent vs requested duration:
Logs at start:
[GPU TIMING] Started generation: pipeline=X, image_size=Y, animation=Z, upscale=W
Logs at end:
[GPU TIMING] Completed generation in X.XXs (pipeline=Y, image_size=Z)
Combined with duration debug logs, this provides complete visibility:
- Requested duration (from get_dynamic_duration)
- Actual time spent
- All parameters used
This allows monitoring and tuning duration calculations based on real usage.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
app.py
CHANGED
|
@@ -671,6 +671,10 @@ def generate_qr_code_unified(
|
|
| 671 |
enable_animation: bool = True,
|
| 672 |
progress=gr.Progress(),
|
| 673 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 674 |
# Only manipulate the text if it's a URL input type
|
| 675 |
qr_text = text_input
|
| 676 |
if input_type == "URL":
|
|
@@ -684,7 +688,7 @@ def generate_qr_code_unified(
|
|
| 684 |
|
| 685 |
with torch.no_grad():
|
| 686 |
if pipeline == "standard":
|
| 687 |
-
|
| 688 |
prompt=prompt,
|
| 689 |
negative_prompt=negative_prompt,
|
| 690 |
qr_text=qr_text,
|
|
@@ -709,9 +713,10 @@ def generate_qr_code_unified(
|
|
| 709 |
variation_steps=variation_steps,
|
| 710 |
enable_animation=enable_animation,
|
| 711 |
gr_progress=progress,
|
| 712 |
-
)
|
|
|
|
| 713 |
else: # artistic
|
| 714 |
-
|
| 715 |
prompt=prompt,
|
| 716 |
negative_prompt=negative_prompt,
|
| 717 |
qr_text=qr_text,
|
|
@@ -743,7 +748,12 @@ def generate_qr_code_unified(
|
|
| 743 |
variation_steps=variation_steps,
|
| 744 |
enable_animation=enable_animation,
|
| 745 |
gr_progress=progress,
|
| 746 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 747 |
|
| 748 |
|
| 749 |
class AnimationHandler:
|
|
|
|
| 671 |
enable_animation: bool = True,
|
| 672 |
progress=gr.Progress(),
|
| 673 |
):
|
| 674 |
+
# Track actual GPU time spent
|
| 675 |
+
start_time = time.time()
|
| 676 |
+
print(f"[GPU TIMING] Started generation: pipeline={pipeline}, image_size={image_size}, animation={enable_animation}, upscale={enable_upscale}")
|
| 677 |
+
|
| 678 |
# Only manipulate the text if it's a URL input type
|
| 679 |
qr_text = text_input
|
| 680 |
if input_type == "URL":
|
|
|
|
| 688 |
|
| 689 |
with torch.no_grad():
|
| 690 |
if pipeline == "standard":
|
| 691 |
+
for result in _pipeline_standard(
|
| 692 |
prompt=prompt,
|
| 693 |
negative_prompt=negative_prompt,
|
| 694 |
qr_text=qr_text,
|
|
|
|
| 713 |
variation_steps=variation_steps,
|
| 714 |
enable_animation=enable_animation,
|
| 715 |
gr_progress=progress,
|
| 716 |
+
):
|
| 717 |
+
yield result
|
| 718 |
else: # artistic
|
| 719 |
+
for result in _pipeline_artistic(
|
| 720 |
prompt=prompt,
|
| 721 |
negative_prompt=negative_prompt,
|
| 722 |
qr_text=qr_text,
|
|
|
|
| 748 |
variation_steps=variation_steps,
|
| 749 |
enable_animation=enable_animation,
|
| 750 |
gr_progress=progress,
|
| 751 |
+
):
|
| 752 |
+
yield result
|
| 753 |
+
|
| 754 |
+
# Log actual time spent after generation completes
|
| 755 |
+
elapsed_time = time.time() - start_time
|
| 756 |
+
print(f"[GPU TIMING] Completed generation in {elapsed_time:.2f}s (pipeline={pipeline}, image_size={image_size})")
|
| 757 |
|
| 758 |
|
| 759 |
class AnimationHandler:
|