Spaces:
Sleeping
Sleeping
Commit
Β·
a1d1dab
1
Parent(s):
4368f78
[Update]: Enhanced app.py for improved model handling and metrics visualization π
Browse files- Updated: `init_stable_diffusion` to explicitly request the `fp32` variant for consistent float32 usage.
- Improved: Metrics formatting in `process_memory_operation` to convert shape to a list for better readability.
- Enhanced: GPU image generation in `generate_art_with_gpu` to ensure the random generator is on CUDA.
- Pro Tip of the Commit: Clarity in metrics and consistency in models make for a smoother ride on the waves of innovation! πβ¨
Aye, Aye! π’
app.py
CHANGED
|
@@ -67,8 +67,9 @@ def init_stable_diffusion():
|
|
| 67 |
|
| 68 |
pipe = DiffusionPipeline.from_pretrained(
|
| 69 |
model_id,
|
| 70 |
-
torch_dtype=torch.float32, #
|
| 71 |
-
use_safetensors=True
|
|
|
|
| 72 |
)
|
| 73 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 74 |
print("β¨ Stable Diffusion loaded on CPU")
|
|
@@ -790,7 +791,10 @@ def process_memory_operation(
|
|
| 790 |
metrics = result["metrics"]
|
| 791 |
metrics_str = "π Analysis Results:\n\n"
|
| 792 |
for key, value in metrics.items():
|
| 793 |
-
|
|
|
|
|
|
|
|
|
|
| 794 |
|
| 795 |
metrics_str += f"\nπ Emotional Context:\n"
|
| 796 |
metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n"
|
|
@@ -819,6 +823,8 @@ def process_memory_operation(
|
|
| 819 |
print(f"β Error during processing: {e}")
|
| 820 |
# Ensure we're back on CPU
|
| 821 |
memory_wave.to("cpu")
|
|
|
|
|
|
|
| 822 |
return None, None, None, None
|
| 823 |
|
| 824 |
@spaces.GPU
|
|
@@ -829,14 +835,14 @@ def generate_art_with_gpu(prompt: str, seed: int = 42) -> Optional[np.ndarray]:
|
|
| 829 |
|
| 830 |
try:
|
| 831 |
# Move to GPU and optimize
|
| 832 |
-
pipe.to("cuda")
|
| 833 |
pipe.enable_model_cpu_offload()
|
| 834 |
pipe.enable_vae_slicing()
|
| 835 |
pipe.enable_vae_tiling()
|
| 836 |
pipe.enable_attention_slicing(slice_size="max")
|
| 837 |
|
| 838 |
# Generate image
|
| 839 |
-
generator = torch.Generator().manual_seed(seed)
|
| 840 |
image = pipe(
|
| 841 |
prompt=prompt,
|
| 842 |
negative_prompt="text, watermark, signature, blurry, distorted",
|
|
|
|
| 67 |
|
| 68 |
pipe = DiffusionPipeline.from_pretrained(
|
| 69 |
model_id,
|
| 70 |
+
torch_dtype=torch.float32, # Use float32 consistently
|
| 71 |
+
use_safetensors=True,
|
| 72 |
+
variant="fp32" # Explicitly request fp32 variant
|
| 73 |
)
|
| 74 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 75 |
print("β¨ Stable Diffusion loaded on CPU")
|
|
|
|
| 791 |
metrics = result["metrics"]
|
| 792 |
metrics_str = "π Analysis Results:\n\n"
|
| 793 |
for key, value in metrics.items():
|
| 794 |
+
if key == "shape":
|
| 795 |
+
metrics_str += f"β’ {key.replace('_', ' ').title()}: {list(value)}\n" # Convert shape to list
|
| 796 |
+
else:
|
| 797 |
+
metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n"
|
| 798 |
|
| 799 |
metrics_str += f"\nπ Emotional Context:\n"
|
| 800 |
metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n"
|
|
|
|
| 823 |
print(f"β Error during processing: {e}")
|
| 824 |
# Ensure we're back on CPU
|
| 825 |
memory_wave.to("cpu")
|
| 826 |
+
if pipe is not None:
|
| 827 |
+
pipe.to("cpu")
|
| 828 |
return None, None, None, None
|
| 829 |
|
| 830 |
@spaces.GPU
|
|
|
|
| 835 |
|
| 836 |
try:
|
| 837 |
# Move to GPU and optimize
|
| 838 |
+
pipe.to("cuda", torch_dtype=torch.float32) # Ensure float32 on GPU
|
| 839 |
pipe.enable_model_cpu_offload()
|
| 840 |
pipe.enable_vae_slicing()
|
| 841 |
pipe.enable_vae_tiling()
|
| 842 |
pipe.enable_attention_slicing(slice_size="max")
|
| 843 |
|
| 844 |
# Generate image
|
| 845 |
+
generator = torch.Generator("cuda").manual_seed(seed) # Ensure generator is on CUDA
|
| 846 |
image = pipe(
|
| 847 |
prompt=prompt,
|
| 848 |
negative_prompt="text, watermark, signature, blurry, distorted",
|