Add files using upload-large-folder tool
Browse files- README.md +2 -0
- __pycache__/pipeline_hsigene.cpython-312.pyc +0 -0
- pipeline_hsigene.py +25 -2
README.md
CHANGED
|
@@ -10,6 +10,8 @@ tags:
|
|
| 10 |
pipeline_tag: image-to-image
|
| 11 |
---
|
| 12 |
|
|
|
|
|
|
|
| 13 |
# BiliSakura/HSIGene
|
| 14 |
|
| 15 |
**Hyperspectral image generation** — HSIGene converted to diffusers format. Supports task-specific conditioning with local controls (HED, MLSD, sketch, segmentation), global controls (content or text), or metadata embeddings. Outputs 48-band hyperspectral images (256×256 pixels).
|
|
|
|
| 10 |
pipeline_tag: image-to-image
|
| 11 |
---
|
| 12 |
|
| 13 |
+
> [!WARNING] we do not have a full checkpoint conversion validation, if you encounter pipeline loading failure and unsidered output, please contact me via bili_sakura@zju.edu.cn
|
| 14 |
+
|
| 15 |
# BiliSakura/HSIGene
|
| 16 |
|
| 17 |
**Hyperspectral image generation** — HSIGene converted to diffusers format. Supports task-specific conditioning with local controls (HED, MLSD, sketch, segmentation), global controls (content or text), or metadata embeddings. Outputs 48-band hyperspectral images (256×256 pixels).
|
__pycache__/pipeline_hsigene.cpython-312.pyc
CHANGED
|
Binary files a/__pycache__/pipeline_hsigene.cpython-312.pyc and b/__pycache__/pipeline_hsigene.cpython-312.pyc differ
|
|
|
pipeline_hsigene.py
CHANGED
|
@@ -139,7 +139,10 @@ class _CRSModelWrapper(torch.nn.Module):
|
|
| 139 |
local_control_scales=None,
|
| 140 |
):
|
| 141 |
super().__init__()
|
| 142 |
-
|
|
|
|
|
|
|
|
|
|
| 143 |
self.first_stage_model = vae
|
| 144 |
self.cond_stage_model = text_encoder
|
| 145 |
self.local_adapter = local_adapter
|
|
@@ -408,7 +411,18 @@ class HSIGenePipeline(DiffusionPipeline):
|
|
| 408 |
return_dict: bool = True,
|
| 409 |
save_memory: bool = False,
|
| 410 |
):
|
| 411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
if text_strength is None:
|
| 413 |
text_strength = global_strength
|
| 414 |
|
|
@@ -468,6 +482,15 @@ class HSIGenePipeline(DiffusionPipeline):
|
|
| 468 |
|
| 469 |
latent_shape = (num_samples, 4, height // 4, width // 4)
|
| 470 |
if latents is None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
latents = torch.randn(
|
| 472 |
latent_shape, device=device, generator=generator, dtype=torch.float32,
|
| 473 |
)
|
|
|
|
| 139 |
local_control_scales=None,
|
| 140 |
):
|
| 141 |
super().__init__()
|
| 142 |
+
# Keep diffusion_model as a properly registered submodule so
|
| 143 |
+
# wrapper/device transfers (e.g., `.to("cuda")`) move UNet weights.
|
| 144 |
+
self.model = torch.nn.Module()
|
| 145 |
+
self.model.add_module("diffusion_model", unet)
|
| 146 |
self.first_stage_model = vae
|
| 147 |
self.cond_stage_model = text_encoder
|
| 148 |
self.local_adapter = local_adapter
|
|
|
|
| 411 |
return_dict: bool = True,
|
| 412 |
save_memory: bool = False,
|
| 413 |
):
|
| 414 |
+
target_device = next(self.crs_model.parameters()).device
|
| 415 |
+
if hasattr(self, "unet") and isinstance(self.unet, torch.nn.Module):
|
| 416 |
+
target_device = next(self.unet.parameters()).device
|
| 417 |
+
if latents is not None:
|
| 418 |
+
target_device = latents.device
|
| 419 |
+
elif generator is not None and hasattr(generator, "device"):
|
| 420 |
+
target_device = torch.device(generator.device)
|
| 421 |
+
|
| 422 |
+
# Keep wrapper submodules on the same device used for sampling.
|
| 423 |
+
if next(self.crs_model.parameters()).device != target_device:
|
| 424 |
+
self.crs_model = self.crs_model.to(target_device)
|
| 425 |
+
device = target_device
|
| 426 |
if text_strength is None:
|
| 427 |
text_strength = global_strength
|
| 428 |
|
|
|
|
| 482 |
|
| 483 |
latent_shape = (num_samples, 4, height // 4, width // 4)
|
| 484 |
if latents is None:
|
| 485 |
+
if generator is not None and hasattr(generator, "device"):
|
| 486 |
+
gen_device = torch.device(generator.device)
|
| 487 |
+
if gen_device.type != device.type:
|
| 488 |
+
# Recreate generator on target device while preserving seed
|
| 489 |
+
# so CPU/CUDA mismatch does not crash torch.randn.
|
| 490 |
+
if hasattr(generator, "initial_seed"):
|
| 491 |
+
generator = torch.Generator(device=device).manual_seed(generator.initial_seed())
|
| 492 |
+
else:
|
| 493 |
+
generator = torch.Generator(device=device)
|
| 494 |
latents = torch.randn(
|
| 495 |
latent_shape, device=device, generator=generator, dtype=torch.float32,
|
| 496 |
)
|