Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- Code/Baselines/CraftsMan3D/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/DoraVAE-dinov2reglarge518-pixart-rectified-flow-dit32.yaml +136 -0
- Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32-finetune.yaml +153 -0
- Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32-mv.yaml +144 -0
- Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32.yaml +140 -0
- Code/Baselines/CraftsMan3D/configs/shape-autoencoder/michelangelo-l768-e64-ne8-nd16.yaml +94 -0
- Code/Baselines/CraftsMan3D/craftsman/__init__.py +55 -0
- Code/Baselines/CraftsMan3D/craftsman/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/__pycache__/pipeline.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/data/Objaverse.py +65 -0
- Code/Baselines/CraftsMan3D/craftsman/data/__init__.py +3 -0
- Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/Objaverse.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/base.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/data/base.py +231 -0
- Code/Baselines/CraftsMan3D/craftsman/models/__init__.py +5 -0
- Code/Baselines/CraftsMan3D/craftsman/models/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__init__.py +4 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/dora_autoencoder.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/michelangelo_autoencoder.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/volume_decoders.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/dora_autoencoder.py +776 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/michelangelo_autoencoder.py +699 -0
- Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/volume_decoders.py +277 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__init__.py +4 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/base.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/clip_dinov2_encoder.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/dinov2_encoder.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/base.py +122 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/__pycache__/modeling_conditional_clip.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/modeling_clip.py +1419 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/modeling_conditional_clip.py +385 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip_dinov2_encoder.py +286 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/__pycache__/modeling_conditional_dinov2.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/modeling_conditional_dinov2.py +228 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/modeling_dinov2.py +859 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_encoder.py +190 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_with_registers/__pycache__/modeling_dinov2_with_registers.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_with_registers/modeling_dinov2_with_registers.py +946 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__init__.py +3 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/__init__.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/pixart_denoiser.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/utils.cpython-310.pyc +0 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/pixart_denoiser.py +160 -0
- Code/Baselines/CraftsMan3D/craftsman/models/denoisers/utils.py +156 -0
- Code/Baselines/CraftsMan3D/craftsman/models/geometry/__init__.py +3 -0
Code/Baselines/CraftsMan3D/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (152 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/DoraVAE-dinov2reglarge518-pixart-rectified-flow-dit32.yaml
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_root_dir: "outputs"
|
| 2 |
+
name: "image-to-shape-diffusion/DoraVAE-dinov2reglarge518-pixart-rectified-flow-dit32"
|
| 3 |
+
tag: "${rmspace:${system.shape_model_type}+n${data.n_samples}+pfeat${system.shape_model.point_feats}+lr${system.optimizer.args.lr},_}"
|
| 4 |
+
seed: 0
|
| 5 |
+
|
| 6 |
+
data_type: "Objaverse-datamodule"
|
| 7 |
+
data:
|
| 8 |
+
local_dir: ./data/Objaverse_100k
|
| 9 |
+
load_geometry: true
|
| 10 |
+
geo_data_type: "sdf"
|
| 11 |
+
geo_data_path: data/Objaverse_100k/surfaces
|
| 12 |
+
sampling_strategy: null
|
| 13 |
+
n_samples: 16384
|
| 14 |
+
|
| 15 |
+
load_supervision: False
|
| 16 |
+
supervision_type: ""
|
| 17 |
+
n_supervision: 0
|
| 18 |
+
|
| 19 |
+
load_image: True # whether to load images
|
| 20 |
+
image_data_path: data/Objaverse_100k/images
|
| 21 |
+
image_type: "rgb" # rgb, normal
|
| 22 |
+
idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] # front view
|
| 23 |
+
n_views: 1
|
| 24 |
+
background_color: [0.5, 0.5, 0.5]
|
| 25 |
+
marign_pix_dis: 30
|
| 26 |
+
|
| 27 |
+
batch_size: 40
|
| 28 |
+
num_workers: 16
|
| 29 |
+
|
| 30 |
+
system_type: "pixart-diffusion-system"
|
| 31 |
+
system:
|
| 32 |
+
val_samples_json: "val_data/images/val_samples_rgb_image.json"
|
| 33 |
+
z_scale_factor: 1.0
|
| 34 |
+
guidance_scale: 7.5
|
| 35 |
+
num_inference_steps: 50
|
| 36 |
+
eta: 0.0
|
| 37 |
+
extract_mesh_func: mc
|
| 38 |
+
|
| 39 |
+
shape_model_type: dora-autoencoder
|
| 40 |
+
shape_model:
|
| 41 |
+
pretrained_dino_name_or_path: craftsman-DoraVAE/model.ckpt
|
| 42 |
+
n_samples: 16384
|
| 43 |
+
with_sharp_data: true
|
| 44 |
+
use_downsample: true
|
| 45 |
+
num_latents: 2048
|
| 46 |
+
embed_dim: 64
|
| 47 |
+
point_feats: 3
|
| 48 |
+
out_dim: 1
|
| 49 |
+
num_freqs: 8
|
| 50 |
+
include_pi: false
|
| 51 |
+
heads: 12
|
| 52 |
+
width: 768
|
| 53 |
+
num_encoder_layers: 8
|
| 54 |
+
num_decoder_layers: 16
|
| 55 |
+
use_ln_post: true
|
| 56 |
+
init_scale: 0.25
|
| 57 |
+
qkv_bias: false
|
| 58 |
+
use_flash: true
|
| 59 |
+
use_checkpoint: true
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
condition_model_type: "cond-embedder"
|
| 63 |
+
condition_model:
|
| 64 |
+
pretrained_clip_name_or_path: openai/clip-vit-large-patch14
|
| 65 |
+
pretrained_dino_name_or_path: facebook/dinov2-base
|
| 66 |
+
freeze_modulation_clip: true
|
| 67 |
+
freeze_modulation_dino: true
|
| 68 |
+
encode_camera: false
|
| 69 |
+
camera_embeds_dim: 0
|
| 70 |
+
n_views: ${data.n_views}
|
| 71 |
+
empty_embeds_ratio: 0.1
|
| 72 |
+
normalize_embeds: false
|
| 73 |
+
zero_uncond_embeds: true
|
| 74 |
+
linear_proj_init: constant
|
| 75 |
+
image_size_dino: 224
|
| 76 |
+
image_size_clip: 224
|
| 77 |
+
|
| 78 |
+
denoiser_model_type: "pixart-denoiser"
|
| 79 |
+
denoiser_model:
|
| 80 |
+
input_channels: ${system.shape_model.embed_dim}
|
| 81 |
+
output_channels: ${system.shape_model.embed_dim}
|
| 82 |
+
n_ctx: ${system.shape_model.num_latents}
|
| 83 |
+
width: 1024
|
| 84 |
+
layers: 32
|
| 85 |
+
heads: 16
|
| 86 |
+
context_dim: 1024
|
| 87 |
+
init_scale: 1.0
|
| 88 |
+
use_checkpoint: true
|
| 89 |
+
|
| 90 |
+
noise_scheduler_type: diffusers.schedulers.FlowMatchEulerDiscreteScheduler
|
| 91 |
+
noise_scheduler:
|
| 92 |
+
num_train_timesteps: 1000
|
| 93 |
+
shift: 3.0
|
| 94 |
+
|
| 95 |
+
denoise_scheduler_type: diffusers.schedulers.FlowMatchEulerDiscreteScheduler
|
| 96 |
+
denoise_scheduler:
|
| 97 |
+
num_train_timesteps: 1000
|
| 98 |
+
shift: 3.0
|
| 99 |
+
|
| 100 |
+
loggers:
|
| 101 |
+
wandb:
|
| 102 |
+
enable: false
|
| 103 |
+
project: "CraftsMan"
|
| 104 |
+
name: image-to-shape-diffusion+${name}+${tag}
|
| 105 |
+
|
| 106 |
+
loss:
|
| 107 |
+
loss_type: "mse"
|
| 108 |
+
lambda_diffusion: 1.
|
| 109 |
+
|
| 110 |
+
optimizer:
|
| 111 |
+
name: AdamW
|
| 112 |
+
args:
|
| 113 |
+
lr: 2.e-4
|
| 114 |
+
betas: [0.9, 0.99]
|
| 115 |
+
eps: 1.e-6
|
| 116 |
+
|
| 117 |
+
scheduler:
|
| 118 |
+
name: CosineAnnealingLR
|
| 119 |
+
args:
|
| 120 |
+
T_max: 5000
|
| 121 |
+
eta_min: 1e-6
|
| 122 |
+
|
| 123 |
+
trainer:
|
| 124 |
+
num_nodes: 1
|
| 125 |
+
max_epochs: 100000
|
| 126 |
+
log_every_n_steps: 5
|
| 127 |
+
num_sanity_val_steps: 1
|
| 128 |
+
check_val_every_n_epoch: 25
|
| 129 |
+
enable_progress_bar: true
|
| 130 |
+
precision: 16-mixed
|
| 131 |
+
strategy: 'ddp_find_unused_parameters_true'
|
| 132 |
+
|
| 133 |
+
checkpoint:
|
| 134 |
+
save_last: true
|
| 135 |
+
save_top_k: -1
|
| 136 |
+
every_n_train_steps: 5000
|
Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32-finetune.yaml
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
####### [INFO] LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
|
| 2 |
+
####### [INFO]
|
| 3 |
+
####### | Name | Type | Params
|
| 4 |
+
####### -----------------------------------------------------------
|
| 5 |
+
####### 0 | shape_model | MichelangeloAutoencoder | 184 M
|
| 6 |
+
####### 1 | condition | ClipDinoEmbedder | 514 M
|
| 7 |
+
####### 2 | denoiser_model | PixArtDinoDenoiser | 547 M
|
| 8 |
+
####### -----------------------------------------------------------
|
| 9 |
+
####### 548 M Trainable params
|
| 10 |
+
####### 698 M Non-trainable params
|
| 11 |
+
####### 1.2 B Total params
|
| 12 |
+
####### 4,986.894 Total estimated model params size (MB)
|
| 13 |
+
|
| 14 |
+
exp_root_dir: "outputs"
|
| 15 |
+
name: "image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32-finetune"
|
| 16 |
+
tag: "${rmspace:${system.shape_model_type}+n${data.n_samples}+pfeat${system.shape_model.point_feats}+lr${system.optimizer.args.lr},_}"
|
| 17 |
+
seed: 0
|
| 18 |
+
|
| 19 |
+
data_type: "Objaverse-datamodule"
|
| 20 |
+
data:
|
| 21 |
+
local_dir: ./data/Objaverse_100k
|
| 22 |
+
load_geometry: true
|
| 23 |
+
geo_data_type: "sdf"
|
| 24 |
+
geo_data_path: data/Objaverse_100k/surfaces
|
| 25 |
+
sampling_strategy: null
|
| 26 |
+
n_samples: 16384
|
| 27 |
+
|
| 28 |
+
load_supervision: False
|
| 29 |
+
supervision_type: ""
|
| 30 |
+
n_supervision: 0
|
| 31 |
+
|
| 32 |
+
load_image: True # whether to load images
|
| 33 |
+
image_data_path: data/Objaverse_100k/images
|
| 34 |
+
image_type: "rgb" # rgb, normal
|
| 35 |
+
idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] # front view
|
| 36 |
+
n_views: 1
|
| 37 |
+
background_color: [0.5, 0.5, 0.5]
|
| 38 |
+
marign_pix_dis: 30
|
| 39 |
+
|
| 40 |
+
batch_size: 40
|
| 41 |
+
num_workers: 16
|
| 42 |
+
|
| 43 |
+
system_type: "shape-diffusion-system"
|
| 44 |
+
system:
|
| 45 |
+
val_samples_json: "val_data/images/val_samples_rgb_image.json"
|
| 46 |
+
z_scale_factor: 1.0
|
| 47 |
+
guidance_scale: 7.5
|
| 48 |
+
num_inference_steps: 50
|
| 49 |
+
eta: 0.0
|
| 50 |
+
extract_mesh_func: mc
|
| 51 |
+
|
| 52 |
+
shape_model_type: michelangelo-autoencoder
|
| 53 |
+
shape_model:
|
| 54 |
+
pretrained_model_name_or_path: ckpts/craftsman-v1-5/model.ckpt
|
| 55 |
+
use_downsample: true
|
| 56 |
+
num_latents: 768
|
| 57 |
+
use_multi_reso: false
|
| 58 |
+
embed_dim: 64
|
| 59 |
+
point_feats: 3
|
| 60 |
+
out_dim: 1
|
| 61 |
+
num_freqs: 8
|
| 62 |
+
include_pi: false
|
| 63 |
+
heads: 12
|
| 64 |
+
width: 768
|
| 65 |
+
num_encoder_layers: 8
|
| 66 |
+
num_decoder_layers: 16
|
| 67 |
+
use_ln_post: true
|
| 68 |
+
init_scale: 0.25
|
| 69 |
+
qkv_bias: false
|
| 70 |
+
use_flash: true
|
| 71 |
+
use_checkpoint: true
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
condition_model_type: "clip-dinov2-embedder"
|
| 75 |
+
condition_model:
|
| 76 |
+
pretrained_model_name_or_path: ckpts/craftsman-v1-5/model.ckpt
|
| 77 |
+
pretrained_clip_name_or_path: openai/clip-vit-large-patch14
|
| 78 |
+
pretrained_dino_name_or_path: facebook/dinov2-base
|
| 79 |
+
freeze_modulation_clip: true
|
| 80 |
+
freeze_modulation_dino: true
|
| 81 |
+
encode_camera: false
|
| 82 |
+
camera_embeds_dim: 0
|
| 83 |
+
n_views: ${data.n_views}
|
| 84 |
+
empty_embeds_ratio: 0.1
|
| 85 |
+
normalize_embeds: false
|
| 86 |
+
zero_uncond_embeds: true
|
| 87 |
+
linear_proj_init: constant
|
| 88 |
+
image_size_dino: 224
|
| 89 |
+
image_size_clip: 224
|
| 90 |
+
|
| 91 |
+
denoiser_model_type: "pixart-denoiser"
|
| 92 |
+
denoiser_model:
|
| 93 |
+
pretrained_model_name_or_path: ckpts/craftsman-v1-5/model.ckpt
|
| 94 |
+
input_channels: ${system.shape_model.embed_dim}
|
| 95 |
+
output_channels: ${system.shape_model.embed_dim}
|
| 96 |
+
n_ctx: ${system.shape_model.num_latents}
|
| 97 |
+
width: 1024
|
| 98 |
+
layers: 32
|
| 99 |
+
heads: 16
|
| 100 |
+
context_dim: 1024
|
| 101 |
+
init_scale: 1.0
|
| 102 |
+
use_checkpoint: true
|
| 103 |
+
|
| 104 |
+
noise_scheduler_type: "diffusers.schedulers.DDPMScheduler"
|
| 105 |
+
noise_scheduler:
|
| 106 |
+
num_train_timesteps: 1000
|
| 107 |
+
beta_start: 0.00085
|
| 108 |
+
beta_end: 0.012
|
| 109 |
+
beta_schedule: "scaled_linear"
|
| 110 |
+
variance_type: "fixed_small"
|
| 111 |
+
clip_sample: false
|
| 112 |
+
|
| 113 |
+
denoise_scheduler_type: "diffusers.schedulers.DDIMScheduler"
|
| 114 |
+
denoise_scheduler:
|
| 115 |
+
num_train_timesteps: 1000
|
| 116 |
+
beta_start: 0.00085
|
| 117 |
+
beta_end: 0.012
|
| 118 |
+
beta_schedule: "scaled_linear"
|
| 119 |
+
clip_sample: false # clip sample to -1~1
|
| 120 |
+
set_alpha_to_one: false
|
| 121 |
+
steps_offset: 1
|
| 122 |
+
|
| 123 |
+
loggers:
|
| 124 |
+
wandb:
|
| 125 |
+
enable: false
|
| 126 |
+
project: "CraftsMan"
|
| 127 |
+
name: image-to-shape-diffusion+${name}+${tag}
|
| 128 |
+
|
| 129 |
+
loss:
|
| 130 |
+
loss_type: "mse"
|
| 131 |
+
lambda_diffusion: 1.
|
| 132 |
+
|
| 133 |
+
optimizer:
|
| 134 |
+
name: AdamW
|
| 135 |
+
args:
|
| 136 |
+
lr: 1.e-4
|
| 137 |
+
betas: [0.9, 0.99]
|
| 138 |
+
eps: 1.e-6
|
| 139 |
+
|
| 140 |
+
trainer:
|
| 141 |
+
num_nodes: 1
|
| 142 |
+
max_epochs: 500
|
| 143 |
+
log_every_n_steps: 5
|
| 144 |
+
num_sanity_val_steps: 1
|
| 145 |
+
check_val_every_n_epoch: 25
|
| 146 |
+
enable_progress_bar: true
|
| 147 |
+
precision: 16-mixed
|
| 148 |
+
strategy: 'ddp_find_unused_parameters_true'
|
| 149 |
+
|
| 150 |
+
checkpoint:
|
| 151 |
+
save_last: true
|
| 152 |
+
save_top_k: -1
|
| 153 |
+
every_n_train_steps: 5000
|
Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32-mv.yaml
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_root_dir: "outputs"
|
| 2 |
+
name: "image-to-shape-diffusion/clip-dino-rgb-pixart-lr2e4-ddim"
|
| 3 |
+
tag: "${rmspace:${system.shape_model_type}+n${data.n_samples}+pfeat${system.shape_model.point_feats}+lr${system.optimizer.args.lr},_}"
|
| 4 |
+
seed: 0
|
| 5 |
+
|
| 6 |
+
data_type: "Objaverse-datamodule"
|
| 7 |
+
data:
|
| 8 |
+
local_dir: ./data/Objaverse_100k
|
| 9 |
+
load_geometry: true
|
| 10 |
+
geo_data_type: "sdf"
|
| 11 |
+
geo_data_path: data/Objaverse_100k/surfaces
|
| 12 |
+
sampling_strategy: null
|
| 13 |
+
n_samples: 16384
|
| 14 |
+
|
| 15 |
+
load_supervision: False
|
| 16 |
+
supervision_type: ""
|
| 17 |
+
n_supervision: 0
|
| 18 |
+
|
| 19 |
+
load_image: True # whether to load images
|
| 20 |
+
image_data_path: data/Objaverse_100k/images
|
| 21 |
+
image_type: "rgb" # rgb, normal
|
| 22 |
+
idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] # front view
|
| 23 |
+
n_views: 1
|
| 24 |
+
background_color: [0.5, 0.5, 0.5]
|
| 25 |
+
marign_pix_dis: 30
|
| 26 |
+
|
| 27 |
+
batch_size: 40
|
| 28 |
+
num_workers: 16
|
| 29 |
+
|
| 30 |
+
system_type: "pixart-diffusion-system"
|
| 31 |
+
system:
|
| 32 |
+
val_samples_json: "val_data/images/val_samples_rgb_image.json"
|
| 33 |
+
z_scale_factor: 1.0
|
| 34 |
+
guidance_scale: 7.5
|
| 35 |
+
num_inference_steps: 50
|
| 36 |
+
eta: 0.0
|
| 37 |
+
extract_mesh_func: mc
|
| 38 |
+
|
| 39 |
+
shape_model_type: michelangelo-autoencoder
|
| 40 |
+
shape_model:
|
| 41 |
+
pretrained_model_name_or_path: ckpts/model.ckpt
|
| 42 |
+
use_downsample: true
|
| 43 |
+
num_latents: 768
|
| 44 |
+
use_multi_reso: false
|
| 45 |
+
embed_dim: 64
|
| 46 |
+
point_feats: 3
|
| 47 |
+
out_dim: 1
|
| 48 |
+
num_freqs: 8
|
| 49 |
+
include_pi: false
|
| 50 |
+
heads: 12
|
| 51 |
+
width: 768
|
| 52 |
+
num_encoder_layers: 8
|
| 53 |
+
num_decoder_layers: 16
|
| 54 |
+
use_ln_post: true
|
| 55 |
+
init_scale: 0.25
|
| 56 |
+
qkv_bias: false
|
| 57 |
+
use_flash: true
|
| 58 |
+
use_checkpoint: true
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
condition_model_type: "cond-embedder"
|
| 62 |
+
condition_model:
|
| 63 |
+
pretrained_clip_name_or_path: openai/clip-vit-large-patch14
|
| 64 |
+
pretrained_dino_name_or_path: facebook/dinov2-base
|
| 65 |
+
freeze_modulation_clip: true
|
| 66 |
+
freeze_modulation_dino: true
|
| 67 |
+
encode_camera: false
|
| 68 |
+
camera_embeds_dim: 0
|
| 69 |
+
n_views: ${data.n_views}
|
| 70 |
+
empty_embeds_ratio: 0.1
|
| 71 |
+
normalize_embeds: false
|
| 72 |
+
zero_uncond_embeds: true
|
| 73 |
+
linear_proj_init: constant
|
| 74 |
+
image_size_dino: 224
|
| 75 |
+
image_size_clip: 224
|
| 76 |
+
|
| 77 |
+
denoiser_model_type: "pixart-denoiser"
|
| 78 |
+
denoiser_model:
|
| 79 |
+
input_channels: ${system.shape_model.embed_dim}
|
| 80 |
+
output_channels: ${system.shape_model.embed_dim}
|
| 81 |
+
n_ctx: ${system.shape_model.num_latents}
|
| 82 |
+
width: 1024
|
| 83 |
+
layers: 32
|
| 84 |
+
heads: 16
|
| 85 |
+
context_dim: 1024
|
| 86 |
+
init_scale: 1.0
|
| 87 |
+
use_checkpoint: true
|
| 88 |
+
|
| 89 |
+
noise_scheduler_type: "diffusers.schedulers.DDPMScheduler"
|
| 90 |
+
noise_scheduler:
|
| 91 |
+
num_train_timesteps: 1000
|
| 92 |
+
beta_start: 0.00085
|
| 93 |
+
beta_end: 0.012
|
| 94 |
+
beta_schedule: "scaled_linear"
|
| 95 |
+
variance_type: "fixed_small"
|
| 96 |
+
clip_sample: false
|
| 97 |
+
|
| 98 |
+
denoise_scheduler_type: "diffusers.schedulers.DDIMScheduler"
|
| 99 |
+
denoise_scheduler:
|
| 100 |
+
num_train_timesteps: 1000
|
| 101 |
+
beta_start: 0.00085
|
| 102 |
+
beta_end: 0.012
|
| 103 |
+
beta_schedule: "scaled_linear"
|
| 104 |
+
clip_sample: false # clip sample to -1~1
|
| 105 |
+
set_alpha_to_one: false
|
| 106 |
+
steps_offset: 1
|
| 107 |
+
|
| 108 |
+
loggers:
|
| 109 |
+
wandb:
|
| 110 |
+
enable: false
|
| 111 |
+
project: "CraftsMan"
|
| 112 |
+
name: image-to-shape-diffusion+${name}+${tag}
|
| 113 |
+
|
| 114 |
+
loss:
|
| 115 |
+
loss_type: "mse"
|
| 116 |
+
lambda_diffusion: 1.
|
| 117 |
+
|
| 118 |
+
optimizer:
|
| 119 |
+
name: AdamW
|
| 120 |
+
args:
|
| 121 |
+
lr: 2.e-4
|
| 122 |
+
betas: [0.9, 0.99]
|
| 123 |
+
eps: 1.e-6
|
| 124 |
+
|
| 125 |
+
scheduler:
|
| 126 |
+
name: CosineAnnealingLR
|
| 127 |
+
args:
|
| 128 |
+
T_max: 5000
|
| 129 |
+
eta_min: 1e-6
|
| 130 |
+
|
| 131 |
+
trainer:
|
| 132 |
+
num_nodes: 1
|
| 133 |
+
max_epochs: 100000
|
| 134 |
+
log_every_n_steps: 5
|
| 135 |
+
num_sanity_val_steps: 1
|
| 136 |
+
check_val_every_n_epoch: 25
|
| 137 |
+
enable_progress_bar: true
|
| 138 |
+
precision: 16-mixed
|
| 139 |
+
strategy: 'ddp_find_unused_parameters_true'
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_last: true
|
| 143 |
+
save_top_k: -1
|
| 144 |
+
every_n_train_steps: 5000
|
Code/Baselines/CraftsMan3D/configs/image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32.yaml
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
####### [INFO] LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
|
| 2 |
+
####### [INFO]
|
| 3 |
+
####### | Name | Type | Params
|
| 4 |
+
####### -----------------------------------------------------------
|
| 5 |
+
####### 0 | shape_model | MichelangeloAutoencoder | 184 M
|
| 6 |
+
####### 1 | condition | ClipDinoEmbedder | 514 M
|
| 7 |
+
####### 2 | denoiser_model | PixArtDinoDenoiser | 547 M
|
| 8 |
+
####### -----------------------------------------------------------
|
| 9 |
+
####### 548 M Trainable params
|
| 10 |
+
####### 698 M Non-trainable params
|
| 11 |
+
####### 1.2 B Total params
|
| 12 |
+
####### 4,986.894 Total estimated model params size (MB)
|
| 13 |
+
|
| 14 |
+
exp_root_dir: "outputs"
|
| 15 |
+
name: "image-to-shape-diffusion/clip-dinov2-pixart-diffusion-dit32"
|
| 16 |
+
tag: "${rmspace:${system.shape_model_type}+n${data.n_samples}+pfeat${system.shape_model.point_feats}+lr${system.optimizer.args.lr},_}"
|
| 17 |
+
seed: 0
|
| 18 |
+
|
| 19 |
+
data_type: "Objaverse-datamodule"
|
| 20 |
+
data:
|
| 21 |
+
local_dir: ./data/Objaverse_100k
|
| 22 |
+
load_geometry: true
|
| 23 |
+
geo_data_type: "sdf"
|
| 24 |
+
geo_data_path: data/Objaverse_100k/surfaces
|
| 25 |
+
sampling_strategy: null
|
| 26 |
+
n_samples: 16384
|
| 27 |
+
|
| 28 |
+
load_supervision: False
|
| 29 |
+
supervision_type: ""
|
| 30 |
+
n_supervision: 0
|
| 31 |
+
|
| 32 |
+
load_image: True # whether to load images
|
| 33 |
+
image_data_path: data/Objaverse_100k/images
|
| 34 |
+
image_type: "rgb" # rgb, normal
|
| 35 |
+
idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] # front view
|
| 36 |
+
n_views: 1
|
| 37 |
+
background_color: [0.5, 0.5, 0.5]
|
| 38 |
+
marign_pix_dis: 30
|
| 39 |
+
|
| 40 |
+
batch_size: 40
|
| 41 |
+
num_workers: 16
|
| 42 |
+
|
| 43 |
+
system_type: "shape-diffusion-system"
|
| 44 |
+
system:
|
| 45 |
+
val_samples_json: "val_data/images/val_samples_rgb_image.json"
|
| 46 |
+
z_scale_factor: 1.0
|
| 47 |
+
guidance_scale: 7.5
|
| 48 |
+
num_inference_steps: 50
|
| 49 |
+
eta: 0.0
|
| 50 |
+
extract_mesh_func: mc
|
| 51 |
+
|
| 52 |
+
shape_model_type: michelangelo-autoencoder
|
| 53 |
+
shape_model:
|
| 54 |
+
pretrained_model_name_or_path: ckpts/craftsman-v1-5/model.ckpt
|
| 55 |
+
use_downsample: true
|
| 56 |
+
num_latents: 768
|
| 57 |
+
use_multi_reso: false
|
| 58 |
+
embed_dim: 64
|
| 59 |
+
point_feats: 3
|
| 60 |
+
out_dim: 1
|
| 61 |
+
num_freqs: 8
|
| 62 |
+
include_pi: false
|
| 63 |
+
heads: 12
|
| 64 |
+
width: 768
|
| 65 |
+
num_encoder_layers: 8
|
| 66 |
+
num_decoder_layers: 16
|
| 67 |
+
use_ln_post: true
|
| 68 |
+
init_scale: 0.25
|
| 69 |
+
qkv_bias: false
|
| 70 |
+
use_flash: true
|
| 71 |
+
use_checkpoint: true
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
condition_model_type: "clip-dinov2-embedder"
|
| 75 |
+
condition_model:
|
| 76 |
+
pretrained_clip_name_or_path: openai/clip-vit-large-patch14
|
| 77 |
+
pretrained_dino_name_or_path: facebook/dinov2-base
|
| 78 |
+
freeze_modulation_clip: true
|
| 79 |
+
freeze_modulation_dino: true
|
| 80 |
+
encode_camera: false
|
| 81 |
+
camera_embeds_dim: 0
|
| 82 |
+
n_views: ${data.n_views}
|
| 83 |
+
empty_embeds_ratio: 0.1
|
| 84 |
+
normalize_embeds: false
|
| 85 |
+
zero_uncond_embeds: true
|
| 86 |
+
linear_proj_init: constant
|
| 87 |
+
image_size_dino: 224
|
| 88 |
+
image_size_clip: 224
|
| 89 |
+
|
| 90 |
+
denoiser_model_type: "pixart-denoiser"
|
| 91 |
+
denoiser_model:
|
| 92 |
+
input_channels: ${system.shape_model.embed_dim}
|
| 93 |
+
output_channels: ${system.shape_model.embed_dim}
|
| 94 |
+
n_ctx: ${system.shape_model.num_latents}
|
| 95 |
+
width: 1024
|
| 96 |
+
layers: 32
|
| 97 |
+
heads: 16
|
| 98 |
+
context_dim: 1024
|
| 99 |
+
init_scale: 1.0
|
| 100 |
+
use_checkpoint: true
|
| 101 |
+
|
| 102 |
+
noise_scheduler_type: "diffusers.schedulers.DDPMScheduler"
|
| 103 |
+
noise_scheduler:
|
| 104 |
+
num_train_timesteps: 1000
|
| 105 |
+
|
| 106 |
+
denoise_scheduler_type: "diffusers.schedulers.DDIMScheduler"
|
| 107 |
+
denoise_scheduler:
|
| 108 |
+
num_train_timesteps: 1000
|
| 109 |
+
|
| 110 |
+
loggers:
|
| 111 |
+
wandb:
|
| 112 |
+
enable: false
|
| 113 |
+
project: "CraftsMan"
|
| 114 |
+
name: image-to-shape-diffusion+${name}+${tag}
|
| 115 |
+
|
| 116 |
+
loss:
|
| 117 |
+
loss_type: "mse"
|
| 118 |
+
lambda_diffusion: 1.
|
| 119 |
+
|
| 120 |
+
optimizer:
|
| 121 |
+
name: AdamW
|
| 122 |
+
args:
|
| 123 |
+
lr: 1.e-4
|
| 124 |
+
betas: [0.9, 0.99]
|
| 125 |
+
eps: 1.e-6
|
| 126 |
+
|
| 127 |
+
trainer:
|
| 128 |
+
num_nodes: 1
|
| 129 |
+
max_epochs: 500
|
| 130 |
+
log_every_n_steps: 5
|
| 131 |
+
num_sanity_val_steps: 1
|
| 132 |
+
check_val_every_n_epoch: 25
|
| 133 |
+
enable_progress_bar: true
|
| 134 |
+
precision: 16-mixed
|
| 135 |
+
strategy: 'ddp_find_unused_parameters_true'
|
| 136 |
+
|
| 137 |
+
checkpoint:
|
| 138 |
+
save_last: true
|
| 139 |
+
save_top_k: -1
|
| 140 |
+
every_n_train_steps: 5000
|
Code/Baselines/CraftsMan3D/configs/shape-autoencoder/michelangelo-l768-e64-ne8-nd16.yaml
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_root_dir: "outputs"
|
| 2 |
+
name: "michelangelo-autoencoder/michelangelo-l768-e64-ne8-nd16"
|
| 3 |
+
tag: "${rmspace:n${data.n_samples}+${data.supervision_type}+${system.shape_model.embed_type}+dsample${system.shape_model.use_downsample}+pfeat${system.shape_model.point_feats}+logits${system.loss.lambda_logits}+kl${system.loss.lambda_kl}+lr${system.optimizer.args.lr},_}"
|
| 4 |
+
seed: 0
|
| 5 |
+
|
| 6 |
+
data_type: "Objaverse-datamodule"
|
| 7 |
+
data:
|
| 8 |
+
root_dir: ./data/objaverse
|
| 9 |
+
|
| 10 |
+
load_geometry: True # whether to load geometry
|
| 11 |
+
geo_data_type: "sdf"
|
| 12 |
+
n_samples: 16384
|
| 13 |
+
load_supervision: True
|
| 14 |
+
supervision_type: "tsdf"
|
| 15 |
+
n_supervision: 16384
|
| 16 |
+
tsdf_threshold: 0.0078125 # threshold for truncating sdf values, used when input is sdf
|
| 17 |
+
|
| 18 |
+
load_image: False # whether to load images
|
| 19 |
+
load_caption: False # whether to load captions
|
| 20 |
+
|
| 21 |
+
batch_size: 8
|
| 22 |
+
num_workers: 0
|
| 23 |
+
|
| 24 |
+
system_type: "shape-autoencoder-system"
|
| 25 |
+
system:
|
| 26 |
+
sample_posterior: true
|
| 27 |
+
|
| 28 |
+
shape_model_type: "michelangelo-autoencoder"
|
| 29 |
+
shape_model:
|
| 30 |
+
num_latents: 1024 # 1024
|
| 31 |
+
embed_dim: 64
|
| 32 |
+
point_feats: 3 # xyz + normal
|
| 33 |
+
out_dim: 1 # only occupancy
|
| 34 |
+
embed_type: "fourier"
|
| 35 |
+
num_freqs: 8
|
| 36 |
+
include_pi: false
|
| 37 |
+
heads: 12
|
| 38 |
+
width: 768
|
| 39 |
+
num_encoder_layers: 8
|
| 40 |
+
num_decoder_layers: 16
|
| 41 |
+
use_ln_post: true
|
| 42 |
+
init_scale: 0.25
|
| 43 |
+
qkv_bias: true
|
| 44 |
+
use_flash: true
|
| 45 |
+
use_checkpoint: true
|
| 46 |
+
use_downsample: true
|
| 47 |
+
|
| 48 |
+
loggers:
|
| 49 |
+
wandb:
|
| 50 |
+
enable: false
|
| 51 |
+
project: "CraftsMan"
|
| 52 |
+
name: shape-autoencoder+${name}+${tag}
|
| 53 |
+
|
| 54 |
+
loss:
|
| 55 |
+
lambda_logits: 1.
|
| 56 |
+
lambda_kl: 0.001
|
| 57 |
+
|
| 58 |
+
optimizer:
|
| 59 |
+
name: AdamW
|
| 60 |
+
args:
|
| 61 |
+
lr: 1.e-4
|
| 62 |
+
betas: [0.9, 0.99]
|
| 63 |
+
eps: 1.e-6
|
| 64 |
+
|
| 65 |
+
scheduler:
|
| 66 |
+
name: SequentialLR
|
| 67 |
+
interval: step
|
| 68 |
+
schedulers:
|
| 69 |
+
- name: LinearLR
|
| 70 |
+
interval: step
|
| 71 |
+
args:
|
| 72 |
+
start_factor: 1e-6
|
| 73 |
+
end_factor: 1.0
|
| 74 |
+
total_iters: 5000
|
| 75 |
+
- name: CosineAnnealingLR
|
| 76 |
+
interval: step
|
| 77 |
+
args:
|
| 78 |
+
T_max: 5000
|
| 79 |
+
eta_min: 0.
|
| 80 |
+
milestones: [5000]
|
| 81 |
+
|
| 82 |
+
trainer:
|
| 83 |
+
num_nodes: 1
|
| 84 |
+
max_epochs: 100000
|
| 85 |
+
log_every_n_steps: 5
|
| 86 |
+
num_sanity_val_steps: 1
|
| 87 |
+
check_val_every_n_epoch: 600
|
| 88 |
+
enable_progress_bar: true
|
| 89 |
+
precision: bf16-mixed
|
| 90 |
+
|
| 91 |
+
checkpoint:
|
| 92 |
+
save_last: true
|
| 93 |
+
save_top_k: -1
|
| 94 |
+
every_n_train_steps: 5000
|
Code/Baselines/CraftsMan3D/craftsman/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
from .pipeline import (
|
| 3 |
+
CraftsManPipeline
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
__modules__ = {}
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def register(name):
|
| 10 |
+
def decorator(cls):
|
| 11 |
+
if name in __modules__:
|
| 12 |
+
raise ValueError(
|
| 13 |
+
f"Module {name} already exists! Names of extensions conflict!"
|
| 14 |
+
)
|
| 15 |
+
else:
|
| 16 |
+
__modules__[name] = cls
|
| 17 |
+
return cls
|
| 18 |
+
|
| 19 |
+
return decorator
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def find(name):
|
| 23 |
+
if name in __modules__:
|
| 24 |
+
return __modules__[name]
|
| 25 |
+
else:
|
| 26 |
+
try:
|
| 27 |
+
module_string = ".".join(name.split(".")[:-1])
|
| 28 |
+
cls_name = name.split(".")[-1]
|
| 29 |
+
module = importlib.import_module(module_string, package=None)
|
| 30 |
+
return getattr(module, cls_name)
|
| 31 |
+
except Exception as e:
|
| 32 |
+
raise ValueError(f"Module {name} not found!")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
### grammar sugar for logging utilities ###
|
| 36 |
+
import logging
|
| 37 |
+
|
| 38 |
+
logger = logging.getLogger("pytorch_lightning")
|
| 39 |
+
|
| 40 |
+
from pytorch_lightning.utilities.rank_zero import (
|
| 41 |
+
rank_zero_debug,
|
| 42 |
+
rank_zero_info,
|
| 43 |
+
rank_zero_only,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
debug = rank_zero_debug
|
| 47 |
+
info = rank_zero_info
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@rank_zero_only
|
| 51 |
+
def warn(*args, **kwargs):
|
| 52 |
+
logger.warn(*args, **kwargs)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
from . import data, models, systems
|
Code/Baselines/CraftsMan3D/craftsman/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/__pycache__/pipeline.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/data/Objaverse.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
import cv2
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
|
| 8 |
+
import pytorch_lightning as pl
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from torch.utils.data import DataLoader
|
| 12 |
+
from craftsman import register
|
| 13 |
+
from craftsman.utils.typing import *
|
| 14 |
+
from craftsman.utils.config import parse_structured
|
| 15 |
+
|
| 16 |
+
from .base import BaseDataModuleConfig, BaseDataset
|
| 17 |
+
|
| 18 |
+
@dataclass
|
| 19 |
+
class ObjaverseDataModuleConfig(BaseDataModuleConfig):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
class ObjaverseDataset(BaseDataset):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@register("Objaverse-datamodule")
|
| 27 |
+
class ObjaverseDataModule(pl.LightningDataModule):
|
| 28 |
+
cfg: ObjaverseDataModuleConfig
|
| 29 |
+
|
| 30 |
+
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
|
| 31 |
+
super().__init__()
|
| 32 |
+
self.cfg = parse_structured(ObjaverseDataModuleConfig, cfg)
|
| 33 |
+
|
| 34 |
+
def setup(self, stage=None) -> None:
|
| 35 |
+
if stage in [None, "fit"]:
|
| 36 |
+
self.train_dataset = ObjaverseDataset(self.cfg, "train")
|
| 37 |
+
if stage in [None, "fit", "validate"]:
|
| 38 |
+
self.val_dataset = ObjaverseDataset(self.cfg, "val")
|
| 39 |
+
if stage in [None, "test", "predict"]:
|
| 40 |
+
self.test_dataset = ObjaverseDataset(self.cfg, "test")
|
| 41 |
+
|
| 42 |
+
def prepare_data(self):
|
| 43 |
+
pass
|
| 44 |
+
|
| 45 |
+
def general_loader(self, dataset, batch_size, collate_fn=None, num_workers=0) -> DataLoader:
|
| 46 |
+
return DataLoader(
|
| 47 |
+
dataset, batch_size=batch_size, collate_fn=collate_fn, num_workers=num_workers
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def train_dataloader(self) -> DataLoader:
|
| 51 |
+
return self.general_loader(
|
| 52 |
+
self.train_dataset,
|
| 53 |
+
batch_size=self.cfg.batch_size,
|
| 54 |
+
collate_fn=self.train_dataset.collate,
|
| 55 |
+
num_workers=self.cfg.num_workers
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def val_dataloader(self) -> DataLoader:
|
| 59 |
+
return self.general_loader(self.val_dataset, batch_size=1)
|
| 60 |
+
|
| 61 |
+
def test_dataloader(self) -> DataLoader:
|
| 62 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
| 63 |
+
|
| 64 |
+
def predict_dataloader(self) -> DataLoader:
|
| 65 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
Code/Baselines/CraftsMan3D/craftsman/data/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
Objaverse
|
| 3 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/Objaverse.cpython-310.pyc
ADDED
|
Binary file (2.94 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (201 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/data/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (7.7 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/data/base.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
import cv2
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
|
| 8 |
+
import random
|
| 9 |
+
import imageio
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
from torch.utils.data import DataLoader, Dataset
|
| 14 |
+
from PIL import Image
|
| 15 |
+
|
| 16 |
+
from craftsman.utils.typing import *
|
| 17 |
+
|
| 18 |
+
def fit_bounding_box(img, mask, marign_pix_dis, background_color):
|
| 19 |
+
# alpha_channel = img[:, :, 3]
|
| 20 |
+
alpha_channel = mask.numpy().squeeze()
|
| 21 |
+
height = np.any(alpha_channel, axis=1)
|
| 22 |
+
width = np.any(alpha_channel, axis=0)
|
| 23 |
+
h_min, h_max = np.where(height)[0][[0, -1]]
|
| 24 |
+
w_min, w_max = np.where(width)[0][[0, -1]]
|
| 25 |
+
box_height = h_max - h_min
|
| 26 |
+
box_width = w_max - w_min
|
| 27 |
+
cropped_image = img[h_min:h_max, w_min:w_max]
|
| 28 |
+
if box_height > box_width:
|
| 29 |
+
new_hight = 512 - 2 * marign_pix_dis
|
| 30 |
+
new_width = int((512 - 2 * marign_pix_dis) / (box_height) * box_width) + 1
|
| 31 |
+
else:
|
| 32 |
+
new_hight = int((512 - 2 * marign_pix_dis) / (box_width) * box_height) + 1
|
| 33 |
+
new_width = 512 - 2 * marign_pix_dis
|
| 34 |
+
new_h_min_pos = int((512 - new_hight) / 2 + 1)
|
| 35 |
+
new_h_max_pos = new_hight + new_h_min_pos
|
| 36 |
+
|
| 37 |
+
new_w_min_pos = int((512 - new_width) / 2 + 1)
|
| 38 |
+
new_w_max_pos = new_width + new_w_min_pos
|
| 39 |
+
# extend of the bbox
|
| 40 |
+
new_image = np.full((512, 512, 3), background_color)
|
| 41 |
+
new_image[new_h_min_pos:new_h_max_pos, new_w_min_pos:new_w_max_pos, :] = cv2.resize(cropped_image.numpy(), (new_width, new_hight))
|
| 42 |
+
|
| 43 |
+
return torch.from_numpy(new_image)
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class BaseDataModuleConfig:
|
| 47 |
+
local_dir: str = None
|
| 48 |
+
|
| 49 |
+
################################# Geometry part #################################
|
| 50 |
+
load_geometry: bool = True # whether to load geometry data
|
| 51 |
+
geo_data_type: str = "occupancy" # occupancy, sdf
|
| 52 |
+
geo_data_path: str = "" # path to the geometry data
|
| 53 |
+
# for occupancy and sdf data
|
| 54 |
+
n_samples: int = 4096 # number of points in input point cloud
|
| 55 |
+
upsample_ratio: int = 1 # upsample ratio for input point cloud
|
| 56 |
+
sampling_strategy: Optional[str] = None # sampling strategy for input point cloud
|
| 57 |
+
scale: float = 1.0 # scale of the input point cloud and target supervision
|
| 58 |
+
load_supervision: bool = True # whether to load supervision
|
| 59 |
+
supervision_type: str = "occupancy" # occupancy, sdf, tsdf
|
| 60 |
+
tsdf_threshold: float = 0.05 # threshold for truncating sdf values, used when input is sdf
|
| 61 |
+
n_supervision: int = 10000 # number of points in supervision
|
| 62 |
+
|
| 63 |
+
################################# Image part #################################
|
| 64 |
+
load_image: bool = False # whether to load images
|
| 65 |
+
image_data_path: str = "" # path to the image data
|
| 66 |
+
image_type: str = "rgb" # rgb, normal
|
| 67 |
+
background_color: Tuple[float, float, float] = field(
|
| 68 |
+
default_factory=lambda: (0.5, 0.5, 0.5)
|
| 69 |
+
)
|
| 70 |
+
idx: Optional[List[int]] = None # index of the image to load
|
| 71 |
+
n_views: int = 1 # number of views
|
| 72 |
+
marign_pix_dis: int = 30 # margin of the bounding box
|
| 73 |
+
batch_size: int = 32
|
| 74 |
+
num_workers: int = 8
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class BaseDataset(Dataset):
|
| 78 |
+
def __init__(self, cfg: Any, split: str) -> None:
|
| 79 |
+
super().__init__()
|
| 80 |
+
self.cfg: BaseDataModuleConfig = cfg
|
| 81 |
+
self.split = split
|
| 82 |
+
|
| 83 |
+
self.uids = json.load(open(f'{cfg.local_dir}/{split}.json'))
|
| 84 |
+
print(f"Loaded {len(self.uids)} {split} uids")
|
| 85 |
+
|
| 86 |
+
def __len__(self):
|
| 87 |
+
return len(self.uids)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _load_shape_from_occupancy_or_sdf(self, index: int) -> Dict[str, Any]:
|
| 91 |
+
if self.cfg.geo_data_type == "occupancy":
|
| 92 |
+
# for input point cloud, using Objaverse-MIX data
|
| 93 |
+
pointcloud = np.load(f'{self.cfg.geo_data_path}/{self.uids[index]}/pointcloud.npz')
|
| 94 |
+
surface = np.asarray(pointcloud['points']) * 2 # range from -1 to 1
|
| 95 |
+
normal = np.asarray(pointcloud['normals'])
|
| 96 |
+
surface = np.concatenate([surface, normal], axis=1)
|
| 97 |
+
elif self.cfg.geo_data_type == "sdf":
|
| 98 |
+
# for sdf data with our own format
|
| 99 |
+
data = np.load(f'{self.cfg.geo_data_path}/{self.uids[index]}.npz')
|
| 100 |
+
# for input point cloud
|
| 101 |
+
surface = data["surface"]
|
| 102 |
+
else:
|
| 103 |
+
raise NotImplementedError(f"Data type {self.cfg.geo_data_type} not implemented")
|
| 104 |
+
|
| 105 |
+
# random sampling
|
| 106 |
+
if self.cfg.sampling_strategy == "random":
|
| 107 |
+
rng = np.random.default_rng()
|
| 108 |
+
ind = rng.choice(surface.shape[0], self.cfg.upsample_ratio * self.cfg.n_samples, replace=False)
|
| 109 |
+
surface = surface[ind]
|
| 110 |
+
elif self.cfg.sampling_strategy == "fps":
|
| 111 |
+
import fpsample
|
| 112 |
+
kdline_fps_samples_idx = fpsample.bucket_fps_kdline_sampling(surface[:, :3], self.cfg.n_samples, h=5)
|
| 113 |
+
surface = surface[kdline_fps_samples_idx]
|
| 114 |
+
elif self.cfg.sampling_strategy is None:
|
| 115 |
+
pass
|
| 116 |
+
else:
|
| 117 |
+
raise NotImplementedError(f"sampling strategy {self.cfg.sampling_strategy} not implemented")
|
| 118 |
+
# rescale data
|
| 119 |
+
surface[:, :3] = surface[:, :3] * self.cfg.scale # target scale
|
| 120 |
+
ret = {
|
| 121 |
+
"uid": self.uids[index].split('/')[-1],
|
| 122 |
+
"surface": surface.astype(np.float32),
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
return ret
|
| 126 |
+
|
| 127 |
+
def _load_shape_supervision_occupancy_or_sdf(self, index: int) -> Dict[str, Any]:
|
| 128 |
+
# for supervision
|
| 129 |
+
ret = {}
|
| 130 |
+
if self.cfg.data_type == "occupancy":
|
| 131 |
+
points = np.load(f'{self.cfg.geo_data_path}/{self.uids[index]}/points.npz')
|
| 132 |
+
rand_points = np.asarray(points['points']) * 2 # range from -1.1 to 1.1
|
| 133 |
+
occupancies = np.asarray(points['occupancies'])
|
| 134 |
+
occupancies = np.unpackbits(occupancies)
|
| 135 |
+
elif self.cfg.data_type == "sdf":
|
| 136 |
+
data = np.load(f'{self.cfg.geo_data_path}/{self.uids[index]}.npz')
|
| 137 |
+
rand_points = data['rand_points']
|
| 138 |
+
sdfs = data['sdfs']
|
| 139 |
+
else:
|
| 140 |
+
raise NotImplementedError(f"Data type {self.cfg.data_type} not implemented")
|
| 141 |
+
|
| 142 |
+
# random sampling
|
| 143 |
+
rng = np.random.default_rng()
|
| 144 |
+
ind = rng.choice(rand_points.shape[0], self.cfg.n_supervision, replace=False)
|
| 145 |
+
rand_points = rand_points[ind]
|
| 146 |
+
rand_points = rand_points * self.cfg.scale
|
| 147 |
+
ret["rand_points"] = rand_points.astype(np.float32)
|
| 148 |
+
|
| 149 |
+
if self.cfg.data_type == "occupancy":
|
| 150 |
+
assert self.cfg.supervision_type == "occupancy", "Only occupancy supervision is supported for occupancy data"
|
| 151 |
+
occupancies = occupancies[ind]
|
| 152 |
+
ret["occupancies"] = occupancies.astype(np.float32)
|
| 153 |
+
elif self.cfg.data_type == "sdf":
|
| 154 |
+
if self.cfg.supervision_type == "sdf":
|
| 155 |
+
ret["sdf"] = sdfs[ind].flatten().astype(np.float32)
|
| 156 |
+
elif self.cfg.supervision_type == "occupancy":
|
| 157 |
+
ret["occupancies"] = np.where(sdfs[ind].flatten() < 1e-3, 0, 1).astype(np.float32)
|
| 158 |
+
elif self.cfg.supervision_type == "tsdf":
|
| 159 |
+
ret["sdf"] = sdfs[ind].flatten().astype(np.float32).clip(-self.cfg.tsdf_threshold, self.cfg.tsdf_threshold) / self.cfg.tsdf_threshold
|
| 160 |
+
else:
|
| 161 |
+
raise NotImplementedError(f"Supervision type {self.cfg.supervision_type} not implemented")
|
| 162 |
+
|
| 163 |
+
return ret
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _load_image(self, index: int) -> Dict[str, Any]:
|
| 167 |
+
def _load_single_image(img_path, background_color, marign_pix_dis=None):
|
| 168 |
+
img = torch.from_numpy(
|
| 169 |
+
np.asarray(
|
| 170 |
+
Image.fromarray(imageio.v2.imread(img_path))
|
| 171 |
+
.convert("RGBA")
|
| 172 |
+
)
|
| 173 |
+
/ 255.0
|
| 174 |
+
).float()
|
| 175 |
+
mask: Float[Tensor, "H W 1"] = img[:, :, -1:]
|
| 176 |
+
image: Float[Tensor, "H W 3"] = img[:, :, :3] * mask + background_color[
|
| 177 |
+
None, None, :
|
| 178 |
+
] * (1 - mask)
|
| 179 |
+
if marign_pix_dis is not None:
|
| 180 |
+
image = fit_bounding_box(image, mask, marign_pix_dis, background_color)
|
| 181 |
+
return image, mask
|
| 182 |
+
|
| 183 |
+
if self.cfg.background_color == [-1, -1, -1]:
|
| 184 |
+
background_color = torch.randint(0, 256, (3,))
|
| 185 |
+
else:
|
| 186 |
+
background_color = torch.as_tensor(self.cfg.background_color)
|
| 187 |
+
ret = {}
|
| 188 |
+
if self.cfg.image_type == "rgb" or self.cfg.image_type == "normal":
|
| 189 |
+
assert self.cfg.n_views == 1, "Only single view is supported for single image"
|
| 190 |
+
sel_idx = random.choice(self.cfg.idx)
|
| 191 |
+
ret["sel_image_idx"] = sel_idx
|
| 192 |
+
if self.cfg.image_type == "rgb":
|
| 193 |
+
img_path = f'{self.cfg.image_data_path}/' + "/".join(self.uids[index].split('/')[-2:]) + f"/{'{:04d}'.format(sel_idx)}_rgb.jpeg"
|
| 194 |
+
elif self.cfg.image_type == "normal":
|
| 195 |
+
img_path = f'{self.cfg.image_data_path}/' + "/".join(self.uids[index].split('/')[-2:]) + f"/{'{:04d}'.format(sel_idx)}_normal.jpeg"
|
| 196 |
+
ret["image"], ret["mask"] = _load_single_image(img_path, background_color, self.cfg.marign_pix_dis)
|
| 197 |
+
|
| 198 |
+
else:
|
| 199 |
+
raise NotImplementedError(f"Image type {self.cfg.image_type} not implemented")
|
| 200 |
+
|
| 201 |
+
return ret
|
| 202 |
+
|
| 203 |
+
def _get_data(self, index):
|
| 204 |
+
ret = {"uid": self.uids[index]}
|
| 205 |
+
# load geometry
|
| 206 |
+
if self.cfg.load_geometry:
|
| 207 |
+
if self.cfg.geo_data_type == "occupancy" or self.cfg.geo_data_type == "sdf":
|
| 208 |
+
# load shape
|
| 209 |
+
ret = self._load_shape_from_occupancy_or_sdf(index)
|
| 210 |
+
# load supervision for shape
|
| 211 |
+
if self.cfg.load_supervision:
|
| 212 |
+
ret.update(self._load_shape_supervision_occupancy_or_sdf(index))
|
| 213 |
+
else:
|
| 214 |
+
raise NotImplementedError(f"Geo data type {self.cfg.geo_data_type} not implemented")
|
| 215 |
+
|
| 216 |
+
# load image
|
| 217 |
+
if self.cfg.load_image:
|
| 218 |
+
ret.update(self._load_image(index))
|
| 219 |
+
|
| 220 |
+
return ret
|
| 221 |
+
|
| 222 |
+
def __getitem__(self, index):
|
| 223 |
+
try:
|
| 224 |
+
return self._get_data(index)
|
| 225 |
+
except Exception as e:
|
| 226 |
+
print(f"Error in {self.uids[index]}: {e}")
|
| 227 |
+
return self.__getitem__(np.random.randint(len(self)))
|
| 228 |
+
|
| 229 |
+
def collate(self, batch):
|
| 230 |
+
from torch.utils.data._utils.collate import default_collate_fn_map
|
| 231 |
+
return torch.utils.data.default_collate(batch)
|
Code/Baselines/CraftsMan3D/craftsman/models/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
autoencoders,
|
| 3 |
+
conditional_encoders,
|
| 4 |
+
denoisers
|
| 5 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (257 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
dora_autoencoder,
|
| 3 |
+
michelangelo_autoencoder,
|
| 4 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (258 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/dora_autoencoder.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/michelangelo_autoencoder.cpython-310.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/__pycache__/volume_decoders.cpython-310.pyc
ADDED
|
Binary file (6.55 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/dora_autoencoder.py
ADDED
|
@@ -0,0 +1,776 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
import random
|
| 7 |
+
import time
|
| 8 |
+
import trimesh
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from einops import repeat, rearrange
|
| 11 |
+
from tqdm import trange
|
| 12 |
+
from itertools import product
|
| 13 |
+
|
| 14 |
+
import craftsman
|
| 15 |
+
from craftsman.models.transformers.perceiver_1d import Perceiver
|
| 16 |
+
from craftsman.models.transformers.attention import ResidualCrossAttentionBlock
|
| 17 |
+
from craftsman.utils.checkpoint import checkpoint
|
| 18 |
+
from craftsman.utils.base import BaseModule
|
| 19 |
+
from craftsman.utils.typing import *
|
| 20 |
+
from craftsman.utils.misc import get_world_size, get_device
|
| 21 |
+
from craftsman.utils.ops import generate_dense_grid_points
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
VALID_EMBED_TYPES = ["identity", "fourier", "learned_fourier", "siren"]
|
| 25 |
+
|
| 26 |
+
class FourierEmbedder(nn.Module):
|
| 27 |
+
def __init__(self,
|
| 28 |
+
num_freqs: int = 6,
|
| 29 |
+
logspace: bool = True,
|
| 30 |
+
input_dim: int = 3,
|
| 31 |
+
include_input: bool = True,
|
| 32 |
+
include_pi: bool = True) -> None:
|
| 33 |
+
super().__init__()
|
| 34 |
+
|
| 35 |
+
if logspace:
|
| 36 |
+
frequencies = 2.0 ** torch.arange(
|
| 37 |
+
num_freqs,
|
| 38 |
+
dtype=torch.float32
|
| 39 |
+
)
|
| 40 |
+
else:
|
| 41 |
+
frequencies = torch.linspace(
|
| 42 |
+
1.0,
|
| 43 |
+
2.0 ** (num_freqs - 1),
|
| 44 |
+
num_freqs,
|
| 45 |
+
dtype=torch.float32
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
if include_pi:
|
| 49 |
+
frequencies *= torch.pi
|
| 50 |
+
|
| 51 |
+
self.register_buffer("frequencies", frequencies, persistent=False)
|
| 52 |
+
self.include_input = include_input
|
| 53 |
+
self.num_freqs = num_freqs
|
| 54 |
+
|
| 55 |
+
self.out_dim = self.get_dims(input_dim)
|
| 56 |
+
|
| 57 |
+
def get_dims(self, input_dim):
|
| 58 |
+
temp = 1 if self.include_input or self.num_freqs == 0 else 0
|
| 59 |
+
out_dim = input_dim * (self.num_freqs * 2 + temp)
|
| 60 |
+
|
| 61 |
+
return out_dim
|
| 62 |
+
|
| 63 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 64 |
+
if self.num_freqs > 0:
|
| 65 |
+
embed = (x[..., None].contiguous() * self.frequencies).view(*x.shape[:-1], -1)
|
| 66 |
+
if self.include_input:
|
| 67 |
+
return torch.cat((x, embed.sin(), embed.cos()), dim=-1)
|
| 68 |
+
else:
|
| 69 |
+
return torch.cat((embed.sin(), embed.cos()), dim=-1)
|
| 70 |
+
else:
|
| 71 |
+
return x
|
| 72 |
+
|
| 73 |
+
class LearnedFourierEmbedder(nn.Module):
|
| 74 |
+
def __init__(self, input_dim, dim):
|
| 75 |
+
super().__init__()
|
| 76 |
+
assert (dim % 2) == 0
|
| 77 |
+
half_dim = dim // 2
|
| 78 |
+
per_channel_dim = half_dim // input_dim
|
| 79 |
+
self.weights = nn.Parameter(torch.randn(per_channel_dim))
|
| 80 |
+
|
| 81 |
+
self.out_dim = self.get_dims(input_dim)
|
| 82 |
+
|
| 83 |
+
def forward(self, x):
|
| 84 |
+
# [b, t, c, 1] * [1, d] = [b, t, c, d] -> [b, t, c * d]
|
| 85 |
+
freqs = (x[..., None] * self.weights[None] * 2 * np.pi).view(*x.shape[:-1], -1)
|
| 86 |
+
fouriered = torch.cat((x, freqs.sin(), freqs.cos()), dim=-1)
|
| 87 |
+
return fouriered
|
| 88 |
+
|
| 89 |
+
def get_dims(self, input_dim):
|
| 90 |
+
return input_dim * (self.weights.shape[0] * 2 + 1)
|
| 91 |
+
|
| 92 |
+
class Sine(nn.Module):
|
| 93 |
+
def __init__(self, w0 = 1.):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.w0 = w0
|
| 96 |
+
def forward(self, x):
|
| 97 |
+
return torch.sin(self.w0 * x)
|
| 98 |
+
|
| 99 |
+
class Siren(nn.Module):
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
in_dim,
|
| 103 |
+
out_dim,
|
| 104 |
+
w0 = 1.,
|
| 105 |
+
c = 6.,
|
| 106 |
+
is_first = False,
|
| 107 |
+
use_bias = True,
|
| 108 |
+
activation = None,
|
| 109 |
+
dropout = 0.
|
| 110 |
+
):
|
| 111 |
+
super().__init__()
|
| 112 |
+
self.in_dim = in_dim
|
| 113 |
+
self.out_dim = out_dim
|
| 114 |
+
self.is_first = is_first
|
| 115 |
+
|
| 116 |
+
weight = torch.zeros(out_dim, in_dim)
|
| 117 |
+
bias = torch.zeros(out_dim) if use_bias else None
|
| 118 |
+
self.init_(weight, bias, c = c, w0 = w0)
|
| 119 |
+
|
| 120 |
+
self.weight = nn.Parameter(weight)
|
| 121 |
+
self.bias = nn.Parameter(bias) if use_bias else None
|
| 122 |
+
self.activation = Sine(w0) if activation is None else activation
|
| 123 |
+
self.dropout = nn.Dropout(dropout)
|
| 124 |
+
|
| 125 |
+
def init_(self, weight, bias, c, w0):
|
| 126 |
+
dim = self.in_dim
|
| 127 |
+
|
| 128 |
+
w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
|
| 129 |
+
weight.uniform_(-w_std, w_std)
|
| 130 |
+
|
| 131 |
+
if bias is not None:
|
| 132 |
+
bias.uniform_(-w_std, w_std)
|
| 133 |
+
|
| 134 |
+
def forward(self, x):
|
| 135 |
+
out = F.linear(x, self.weight, self.bias)
|
| 136 |
+
out = self.activation(out)
|
| 137 |
+
out = self.dropout(out)
|
| 138 |
+
return out
|
| 139 |
+
|
| 140 |
+
def get_embedder(embed_type="fourier", num_freqs=-1, input_dim=3, include_pi=True):
|
| 141 |
+
if embed_type == "identity" or (embed_type == "fourier" and num_freqs == -1):
|
| 142 |
+
return nn.Identity(), input_dim
|
| 143 |
+
|
| 144 |
+
elif embed_type == "fourier":
|
| 145 |
+
embedder_obj = FourierEmbedder(num_freqs=num_freqs, include_pi=include_pi)
|
| 146 |
+
|
| 147 |
+
elif embed_type == "learned_fourier":
|
| 148 |
+
embedder_obj = LearnedFourierEmbedder(in_channels=input_dim, dim=num_freqs)
|
| 149 |
+
|
| 150 |
+
elif embed_type == "siren":
|
| 151 |
+
embedder_obj = Siren(in_dim=input_dim, out_dim=num_freqs * input_dim * 2 + input_dim)
|
| 152 |
+
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError(f"{embed_type} is not valid. Currently only supprts {VALID_EMBED_TYPES}")
|
| 155 |
+
return embedder_obj
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
###################### AutoEncoder
|
| 159 |
+
class AutoEncoder(BaseModule):
|
| 160 |
+
@dataclass
|
| 161 |
+
class Config(BaseModule.Config):
|
| 162 |
+
pretrained_model_name_or_path: str = ""
|
| 163 |
+
num_latents: int = 256
|
| 164 |
+
embed_dim: int = 64
|
| 165 |
+
width: int = 768
|
| 166 |
+
|
| 167 |
+
cfg: Config
|
| 168 |
+
|
| 169 |
+
def configure(self) -> None:
|
| 170 |
+
super().configure()
|
| 171 |
+
|
| 172 |
+
def encode(self, x: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
| 173 |
+
raise NotImplementedError
|
| 174 |
+
|
| 175 |
+
def decode(self, z: torch.FloatTensor) -> torch.FloatTensor:
|
| 176 |
+
raise NotImplementedError
|
| 177 |
+
|
| 178 |
+
def encode_kl_embed(self, latents: torch.FloatTensor, sample_posterior: bool = True):
|
| 179 |
+
posterior = None
|
| 180 |
+
if self.cfg.embed_dim > 0:
|
| 181 |
+
moments = self.pre_kl(latents)
|
| 182 |
+
posterior = DiagonalGaussianDistribution(moments, feat_dim=-1)
|
| 183 |
+
if sample_posterior:
|
| 184 |
+
kl_embed = posterior.sample()
|
| 185 |
+
else:
|
| 186 |
+
kl_embed = posterior.mode()
|
| 187 |
+
else:
|
| 188 |
+
kl_embed = latents
|
| 189 |
+
return kl_embed, posterior
|
| 190 |
+
|
| 191 |
+
def forward(self,
|
| 192 |
+
surface: torch.FloatTensor,
|
| 193 |
+
queries: torch.FloatTensor,
|
| 194 |
+
sample_posterior: bool = True,
|
| 195 |
+
sharp_surface: torch.FloatTensor = None):
|
| 196 |
+
shape_latents, kl_embed, posterior = self.encode(surface, sample_posterior=sample_posterior, sharp_surface=sharp_surface)
|
| 197 |
+
|
| 198 |
+
latents = self.decode(kl_embed) # [B, num_latents, width]
|
| 199 |
+
|
| 200 |
+
logits = self.query(queries, latents) # [B,]
|
| 201 |
+
|
| 202 |
+
return shape_latents, latents, posterior, logits
|
| 203 |
+
|
| 204 |
+
def query(self, queries: torch.FloatTensor, latents: torch.FloatTensor) -> torch.FloatTensor:
|
| 205 |
+
raise NotImplementedError
|
| 206 |
+
|
| 207 |
+
@torch.no_grad()
|
| 208 |
+
def extract_geometry(self,
|
| 209 |
+
latents: torch.FloatTensor,
|
| 210 |
+
extract_mesh_func: str = "mc",
|
| 211 |
+
bounds: Union[Tuple[float], List[float], float] = (-1.05, -1.05, -1.05, 1.05, 1.05, 1.05),
|
| 212 |
+
octree_depth: int = 8,
|
| 213 |
+
num_chunks: int = 100000,
|
| 214 |
+
use_flashVDM: bool = True,
|
| 215 |
+
):
|
| 216 |
+
|
| 217 |
+
batch_size = latents.shape[0]
|
| 218 |
+
if isinstance(bounds, float):
|
| 219 |
+
bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
|
| 220 |
+
|
| 221 |
+
bbox_min = np.array(bounds[0:3])
|
| 222 |
+
bbox_max = np.array(bounds[3:6])
|
| 223 |
+
bbox_size = bbox_max - bbox_min
|
| 224 |
+
|
| 225 |
+
if use_flashVDM:
|
| 226 |
+
from .volume_decoders import HierarchicalVolumeDecoding
|
| 227 |
+
volume_decoder = HierarchicalVolumeDecoding()
|
| 228 |
+
grid_logits = volume_decoder(latents, self.query, \
|
| 229 |
+
**{'bounds': bounds, 'octree_resolution': 2**octree_depth, 'num_chunks': num_chunks})
|
| 230 |
+
grid_logits = grid_logits.cpu().float().numpy()
|
| 231 |
+
grid_size = grid_logits.shape[1:4]
|
| 232 |
+
|
| 233 |
+
else:
|
| 234 |
+
start_time_generate_dense_grid_points = time.time()
|
| 235 |
+
xyz_samples, grid_size, length = generate_dense_grid_points(
|
| 236 |
+
bbox_min=bbox_min,
|
| 237 |
+
bbox_max=bbox_max,
|
| 238 |
+
octree_depth=octree_depth,
|
| 239 |
+
indexing="ij"
|
| 240 |
+
)
|
| 241 |
+
xyz_samples = torch.FloatTensor(xyz_samples)
|
| 242 |
+
print(f"generate_dense_grid_points time: {time.time()-start_time_generate_dense_grid_points}")
|
| 243 |
+
|
| 244 |
+
start_time_query_sdf = time.time()
|
| 245 |
+
batch_logits = []
|
| 246 |
+
for start in range(0, xyz_samples.shape[0], num_chunks):
|
| 247 |
+
queries = xyz_samples[start: start + num_chunks, :].to(latents)
|
| 248 |
+
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
| 249 |
+
|
| 250 |
+
logits = self.query(batch_queries, latents)
|
| 251 |
+
batch_logits.append(logits.cpu())
|
| 252 |
+
print(f"query_sdf time: {time.time()-start_time_query_sdf}")
|
| 253 |
+
|
| 254 |
+
grid_logits = torch.cat(batch_logits, dim=1).view((batch_size, grid_size[0], grid_size[1], grid_size[2])).float().numpy()
|
| 255 |
+
|
| 256 |
+
start_time_extract_mesh = time.time()
|
| 257 |
+
mesh_v_f = []
|
| 258 |
+
has_surface = np.zeros((batch_size,), dtype=np.bool_)
|
| 259 |
+
for i in range(batch_size):
|
| 260 |
+
try:
|
| 261 |
+
if extract_mesh_func == "mc":
|
| 262 |
+
from skimage import measure
|
| 263 |
+
vertices, faces, normals, _ = measure.marching_cubes(grid_logits[i], 0)
|
| 264 |
+
vertices = vertices / grid_size * bbox_size + bbox_min
|
| 265 |
+
faces = faces[:, [2, 1, 0]]
|
| 266 |
+
elif extract_mesh_func == "diffmc":
|
| 267 |
+
from diso import DiffMC
|
| 268 |
+
diffmc = DiffMC(dtype=torch.float32).to(latents.device)
|
| 269 |
+
vertices, faces = diffmc(-torch.tensor(grid_logits[i]).float().to(latents.device), isovalue=0)
|
| 270 |
+
vertices = vertices * 2 - 1
|
| 271 |
+
vertices = vertices.cpu().numpy()
|
| 272 |
+
faces = faces.cpu().numpy()
|
| 273 |
+
faces = faces[:, [2, 1, 0]]
|
| 274 |
+
elif extract_mesh_func == "diffdmc":
|
| 275 |
+
from diso import DiffDMC
|
| 276 |
+
diffmc = DiffDMC(dtype=torch.float32).to(latents.device)
|
| 277 |
+
vertices, faces = diffmc(-torch.tensor(grid_logits[i]).float().to(latents.device), isovalue=0)
|
| 278 |
+
vertices = vertices * 2 - 1
|
| 279 |
+
vertices = vertices.cpu().numpy()
|
| 280 |
+
faces = faces.cpu().numpy()
|
| 281 |
+
faces = faces[:, [2, 1, 0]]
|
| 282 |
+
else:
|
| 283 |
+
raise NotImplementedError(f"{extract_mesh_func} not implement")
|
| 284 |
+
mesh_v_f.append((vertices.astype(np.float32), np.ascontiguousarray(faces.astype(np.int64))))
|
| 285 |
+
has_surface[i] = True
|
| 286 |
+
except:
|
| 287 |
+
mesh_v_f.append((None, None))
|
| 288 |
+
has_surface[i] = False
|
| 289 |
+
|
| 290 |
+
print(f"extract_mesh time: {time.time()-start_time_extract_mesh}")
|
| 291 |
+
return mesh_v_f, has_surface
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class DiagonalGaussianDistribution(object):
|
| 295 |
+
def __init__(self, parameters: Union[torch.Tensor, List[torch.Tensor]], deterministic=False, feat_dim=1):
|
| 296 |
+
self.feat_dim = feat_dim
|
| 297 |
+
self.parameters = parameters
|
| 298 |
+
|
| 299 |
+
if isinstance(parameters, list):
|
| 300 |
+
self.mean = parameters[0]
|
| 301 |
+
self.logvar = parameters[1]
|
| 302 |
+
else:
|
| 303 |
+
self.mean, self.logvar = torch.chunk(parameters, 2, dim=feat_dim)
|
| 304 |
+
|
| 305 |
+
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
| 306 |
+
self.deterministic = deterministic
|
| 307 |
+
self.std = torch.exp(0.5 * self.logvar)
|
| 308 |
+
self.var = torch.exp(self.logvar)
|
| 309 |
+
if self.deterministic:
|
| 310 |
+
self.var = self.std = torch.zeros_like(self.mean)
|
| 311 |
+
|
| 312 |
+
def sample(self):
|
| 313 |
+
x = self.mean + self.std * torch.randn_like(self.mean)
|
| 314 |
+
return x
|
| 315 |
+
|
| 316 |
+
def kl(self, other=None, dims=(1, 2)):
|
| 317 |
+
if self.deterministic:
|
| 318 |
+
return torch.Tensor([0.])
|
| 319 |
+
else:
|
| 320 |
+
if other is None:
|
| 321 |
+
return 0.5 * torch.mean(torch.pow(self.mean, 2)
|
| 322 |
+
+ self.var - 1.0 - self.logvar,
|
| 323 |
+
dim=dims)
|
| 324 |
+
else:
|
| 325 |
+
return 0.5 * torch.mean(
|
| 326 |
+
torch.pow(self.mean - other.mean, 2) / other.var
|
| 327 |
+
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
| 328 |
+
dim=dims)
|
| 329 |
+
|
| 330 |
+
def nll(self, sample, dims=(1, 2)):
|
| 331 |
+
if self.deterministic:
|
| 332 |
+
return torch.Tensor([0.])
|
| 333 |
+
logtwopi = np.log(2.0 * np.pi)
|
| 334 |
+
return 0.5 * torch.sum(
|
| 335 |
+
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
| 336 |
+
dim=dims)
|
| 337 |
+
|
| 338 |
+
def mode(self):
|
| 339 |
+
return self.mean
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class PerceiverCrossAttentionEncoder(nn.Module):
|
| 343 |
+
def __init__(self,
|
| 344 |
+
use_downsample: bool,
|
| 345 |
+
num_latents: int,
|
| 346 |
+
embedder: FourierEmbedder,
|
| 347 |
+
point_feats: int,
|
| 348 |
+
embed_point_feats: bool,
|
| 349 |
+
width: int,
|
| 350 |
+
heads: int,
|
| 351 |
+
layers: int,
|
| 352 |
+
init_scale: float = 0.25,
|
| 353 |
+
qkv_bias: bool = True,
|
| 354 |
+
qk_norm: bool = True,
|
| 355 |
+
use_ln_post: bool = False,
|
| 356 |
+
use_flash: bool = False,
|
| 357 |
+
use_checkpoint: bool = False,
|
| 358 |
+
use_multi_reso: bool = False,
|
| 359 |
+
resolutions: list = [],
|
| 360 |
+
sampling_prob: list = [],
|
| 361 |
+
with_sharp_data: bool = False):
|
| 362 |
+
|
| 363 |
+
super().__init__()
|
| 364 |
+
|
| 365 |
+
self.use_checkpoint = use_checkpoint
|
| 366 |
+
self.num_latents = num_latents
|
| 367 |
+
self.use_downsample = use_downsample
|
| 368 |
+
self.embed_point_feats = embed_point_feats
|
| 369 |
+
self.use_multi_reso = use_multi_reso
|
| 370 |
+
self.resolutions = resolutions
|
| 371 |
+
self.sampling_prob = sampling_prob
|
| 372 |
+
|
| 373 |
+
if not self.use_downsample:
|
| 374 |
+
self.query = nn.Parameter(torch.randn((num_latents, width)) * 0.02)
|
| 375 |
+
|
| 376 |
+
self.embedder = embedder
|
| 377 |
+
if self.embed_point_feats:
|
| 378 |
+
self.input_proj = nn.Linear(self.embedder.out_dim * 2, width)
|
| 379 |
+
else:
|
| 380 |
+
self.input_proj = nn.Linear(self.embedder.out_dim + point_feats, width)
|
| 381 |
+
|
| 382 |
+
self.cross_attn = ResidualCrossAttentionBlock(
|
| 383 |
+
width=width,
|
| 384 |
+
heads=heads,
|
| 385 |
+
init_scale=init_scale,
|
| 386 |
+
qkv_bias=qkv_bias,
|
| 387 |
+
qk_norm=qk_norm,
|
| 388 |
+
use_flash=use_flash,
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
self.with_sharp_data = with_sharp_data
|
| 392 |
+
if with_sharp_data:
|
| 393 |
+
self.downsmaple_num_latents = num_latents // 2
|
| 394 |
+
self.input_proj_sharp = nn.Linear(self.embedder.out_dim + point_feats, width)
|
| 395 |
+
self.cross_attn_sharp = ResidualCrossAttentionBlock( #给sharp 数据的cross attn
|
| 396 |
+
width=width,
|
| 397 |
+
heads=heads,
|
| 398 |
+
init_scale=init_scale,
|
| 399 |
+
qkv_bias=qkv_bias,
|
| 400 |
+
qk_norm=qk_norm,
|
| 401 |
+
use_flash=use_flash
|
| 402 |
+
)
|
| 403 |
+
else:
|
| 404 |
+
self.downsmaple_num_latents = num_latents
|
| 405 |
+
|
| 406 |
+
self.self_attn = Perceiver(
|
| 407 |
+
n_ctx=num_latents,
|
| 408 |
+
width=width,
|
| 409 |
+
layers=layers,
|
| 410 |
+
heads=heads,
|
| 411 |
+
init_scale=init_scale,
|
| 412 |
+
qkv_bias=qkv_bias,
|
| 413 |
+
qk_norm=qk_norm,
|
| 414 |
+
use_flash=use_flash,
|
| 415 |
+
use_checkpoint=False
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
if use_ln_post:
|
| 419 |
+
self.ln_post = nn.LayerNorm(width)
|
| 420 |
+
else:
|
| 421 |
+
self.ln_post = None
|
| 422 |
+
|
| 423 |
+
def _forward(self, pc, feats, sharp_pc = None, sharp_feat = None):
|
| 424 |
+
"""
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
pc (torch.FloatTensor): [B, N, 3]
|
| 428 |
+
feats (torch.FloatTensor or None): [B, N, C]
|
| 429 |
+
|
| 430 |
+
Returns:
|
| 431 |
+
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
bs, N, D = pc.shape
|
| 435 |
+
|
| 436 |
+
data = self.embedder(pc)
|
| 437 |
+
if feats is not None:
|
| 438 |
+
if self.embed_point_feats:
|
| 439 |
+
feats = self.embedder(feats)
|
| 440 |
+
data = torch.cat([data, feats], dim=-1)
|
| 441 |
+
data = self.input_proj(data)
|
| 442 |
+
|
| 443 |
+
if self.with_sharp_data:
|
| 444 |
+
sharp_data = self.embedder(sharp_pc)
|
| 445 |
+
if sharp_feat is not None:
|
| 446 |
+
if self.embed_point_feats:
|
| 447 |
+
sharp_feat = self.embedder(sharp_feat)
|
| 448 |
+
sharp_data = torch.cat([sharp_data, sharp_feat], dim=-1)
|
| 449 |
+
sharp_data = self.input_proj_sharp(sharp_data)
|
| 450 |
+
|
| 451 |
+
if self.use_multi_reso:
|
| 452 |
+
resolution = random.choice(self.resolutions, size=1, p=self.sampling_prob)[0]
|
| 453 |
+
|
| 454 |
+
if resolution != N:
|
| 455 |
+
flattened = pc.view(bs*N, D) # bs*N, 64. 103,4096,3 -> 421888,3
|
| 456 |
+
batch = torch.arange(bs).to(pc.device) # 103
|
| 457 |
+
batch = torch.repeat_interleave(batch, N) # bs*N. 421888
|
| 458 |
+
pos = flattened
|
| 459 |
+
ratio = 1.0 * resolution / N # 0.0625
|
| 460 |
+
idx = fps(pos, batch, ratio=ratio) #26368
|
| 461 |
+
pc = pc.view(bs*N, -1)[idx].view(bs, -1, D)
|
| 462 |
+
bs,N,D=feats.shape
|
| 463 |
+
flattened1 = feats.view(bs*N, D)
|
| 464 |
+
feats= flattened1.view(bs*N, -1)[idx].view(bs, -1, D)
|
| 465 |
+
bs, N, D = pc.shape
|
| 466 |
+
|
| 467 |
+
if self.use_downsample:
|
| 468 |
+
###### fps
|
| 469 |
+
from torch_cluster import fps
|
| 470 |
+
flattened = pc.view(bs*N, D) # bs*N, 64
|
| 471 |
+
|
| 472 |
+
batch = torch.arange(bs).to(pc.device)
|
| 473 |
+
batch = torch.repeat_interleave(batch, N) # bs*N
|
| 474 |
+
|
| 475 |
+
pos = flattened.to(torch.float16)
|
| 476 |
+
|
| 477 |
+
ratio = 1.0 * self.downsmaple_num_latents / N
|
| 478 |
+
|
| 479 |
+
idx = fps(pos, batch, ratio=ratio).detach()
|
| 480 |
+
|
| 481 |
+
query = data.view(bs*N, -1)[idx].view(bs, -1, data.shape[-1])
|
| 482 |
+
|
| 483 |
+
if self.with_sharp_data:
|
| 484 |
+
bs, N, D = sharp_pc.shape
|
| 485 |
+
flattened = sharp_pc.view(bs*N, D) # bs*N, 64
|
| 486 |
+
|
| 487 |
+
pos = flattened.to(torch.float16)
|
| 488 |
+
|
| 489 |
+
ratio = 1.0 * self.downsmaple_num_latents / N
|
| 490 |
+
|
| 491 |
+
idx = fps(pos, batch, ratio=ratio).detach()
|
| 492 |
+
|
| 493 |
+
sharp_query = sharp_data.view(bs*N, -1)[idx].view(bs, -1, sharp_data.shape[-1])
|
| 494 |
+
|
| 495 |
+
query = torch.cat([query, sharp_query], dim=1)
|
| 496 |
+
else:
|
| 497 |
+
query = self.query
|
| 498 |
+
query = repeat(query, "m c -> b m c", b=bs)
|
| 499 |
+
|
| 500 |
+
latents = self.cross_attn(query, data)
|
| 501 |
+
if self.with_sharp_data:
|
| 502 |
+
latents = latents + self.cross_attn_sharp(query, sharp_data)
|
| 503 |
+
latents = self.self_attn(latents)
|
| 504 |
+
|
| 505 |
+
if self.ln_post is not None:
|
| 506 |
+
latents = self.ln_post(latents)
|
| 507 |
+
|
| 508 |
+
return latents
|
| 509 |
+
|
| 510 |
+
def forward(self, pc: torch.FloatTensor,
|
| 511 |
+
feats: Optional[torch.FloatTensor] = None,
|
| 512 |
+
sharp_pc: Optional[torch.FloatTensor] = None,
|
| 513 |
+
sharp_feats: Optional[torch.FloatTensor] = None):
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
pc (torch.FloatTensor): [B, N, 3]
|
| 518 |
+
feats (torch.FloatTensor or None): [B, N, C]
|
| 519 |
+
|
| 520 |
+
Returns:
|
| 521 |
+
dict
|
| 522 |
+
"""
|
| 523 |
+
|
| 524 |
+
return checkpoint(self._forward, (pc, feats, sharp_pc, sharp_feats), self.parameters(), self.use_checkpoint)
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
class PerceiverCrossAttentionDecoder(nn.Module):
|
| 528 |
+
|
| 529 |
+
def __init__(self,
|
| 530 |
+
num_latents: int,
|
| 531 |
+
out_dim: int,
|
| 532 |
+
embedder: FourierEmbedder,
|
| 533 |
+
width: int,
|
| 534 |
+
heads: int,
|
| 535 |
+
init_scale: float = 0.25,
|
| 536 |
+
qkv_bias: bool = True,
|
| 537 |
+
qk_norm: bool = True,
|
| 538 |
+
use_flash: bool = False,
|
| 539 |
+
use_checkpoint: bool = False):
|
| 540 |
+
|
| 541 |
+
super().__init__()
|
| 542 |
+
|
| 543 |
+
self.use_checkpoint = use_checkpoint
|
| 544 |
+
self.embedder = embedder
|
| 545 |
+
|
| 546 |
+
self.query_proj = nn.Linear(self.embedder.out_dim, width)
|
| 547 |
+
|
| 548 |
+
self.cross_attn_decoder = ResidualCrossAttentionBlock(
|
| 549 |
+
n_data=num_latents,
|
| 550 |
+
width=width,
|
| 551 |
+
heads=heads,
|
| 552 |
+
init_scale=init_scale,
|
| 553 |
+
qkv_bias=qkv_bias,
|
| 554 |
+
qk_norm=qk_norm,
|
| 555 |
+
use_flash=use_flash
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
self.ln_post = nn.LayerNorm(width)
|
| 559 |
+
self.output_proj = nn.Linear(width, out_dim)
|
| 560 |
+
|
| 561 |
+
def _forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor):
|
| 562 |
+
queries = self.query_proj(self.embedder(queries))
|
| 563 |
+
x = self.cross_attn_decoder(queries, latents)
|
| 564 |
+
x = self.ln_post(x)
|
| 565 |
+
x = self.output_proj(x)
|
| 566 |
+
return x
|
| 567 |
+
|
| 568 |
+
def forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor):
|
| 569 |
+
return checkpoint(self._forward, (queries, latents), self.parameters(), self.use_checkpoint)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
@craftsman.register("dora-autoencoder")
|
| 573 |
+
class DoraAutoencoder(AutoEncoder):
|
| 574 |
+
r"""
|
| 575 |
+
A VAE model for encoding shapes into latents and decoding latent representations into shapes.
|
| 576 |
+
Dora: (https://aruichen.github.io/Dora/)
|
| 577 |
+
"""
|
| 578 |
+
|
| 579 |
+
@dataclass
|
| 580 |
+
class Config(BaseModule.Config):
|
| 581 |
+
pretrained_model_name_or_path: str = ""
|
| 582 |
+
n_samples: int = 4096
|
| 583 |
+
use_downsample: bool = False
|
| 584 |
+
downsample_ratio: float = 0.0625
|
| 585 |
+
num_latents: int = 256
|
| 586 |
+
point_feats: int = 0
|
| 587 |
+
embed_point_feats: bool = False
|
| 588 |
+
out_dim: int = 1
|
| 589 |
+
embed_dim: int = 64
|
| 590 |
+
embed_type: str = "fourier"
|
| 591 |
+
num_freqs: int = 8
|
| 592 |
+
include_pi: bool = True
|
| 593 |
+
width: int = 768
|
| 594 |
+
heads: int = 12
|
| 595 |
+
num_encoder_layers: int = 8
|
| 596 |
+
num_decoder_layers: int = 16
|
| 597 |
+
init_scale: float = 0.25
|
| 598 |
+
qkv_bias: bool = True
|
| 599 |
+
qk_norm: bool = False
|
| 600 |
+
use_ln_post: bool = False
|
| 601 |
+
use_flash: bool = False
|
| 602 |
+
use_checkpoint: bool = True
|
| 603 |
+
use_multi_reso: Optional[bool] = False
|
| 604 |
+
resolutions: Optional[List[int]] = None
|
| 605 |
+
sampling_prob: Optional[List[float]] = None
|
| 606 |
+
with_sharp_data: Optional[bool] = True
|
| 607 |
+
|
| 608 |
+
cfg: Config
|
| 609 |
+
|
| 610 |
+
def configure(self) -> None:
|
| 611 |
+
super().configure()
|
| 612 |
+
|
| 613 |
+
self.embedder = get_embedder(embed_type=self.cfg.embed_type, num_freqs=self.cfg.num_freqs, include_pi=self.cfg.include_pi)
|
| 614 |
+
|
| 615 |
+
# encoder
|
| 616 |
+
self.cfg.init_scale = self.cfg.init_scale * math.sqrt(1.0 / self.cfg.width)
|
| 617 |
+
self.encoder = PerceiverCrossAttentionEncoder(
|
| 618 |
+
use_downsample=self.cfg.use_downsample,
|
| 619 |
+
embedder=self.embedder,
|
| 620 |
+
num_latents=self.cfg.num_latents,
|
| 621 |
+
point_feats=self.cfg.point_feats,
|
| 622 |
+
embed_point_feats=self.cfg.embed_point_feats,
|
| 623 |
+
width=self.cfg.width,
|
| 624 |
+
heads=self.cfg.heads,
|
| 625 |
+
layers=self.cfg.num_encoder_layers,
|
| 626 |
+
init_scale=self.cfg.init_scale,
|
| 627 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 628 |
+
qk_norm=self.cfg.qk_norm,
|
| 629 |
+
use_ln_post=self.cfg.use_ln_post,
|
| 630 |
+
use_flash=self.cfg.use_flash,
|
| 631 |
+
use_checkpoint=self.cfg.use_checkpoint,
|
| 632 |
+
use_multi_reso=self.cfg.use_multi_reso,
|
| 633 |
+
resolutions=self.cfg.resolutions,
|
| 634 |
+
sampling_prob=self.cfg.sampling_prob,
|
| 635 |
+
with_sharp_data=self.cfg.with_sharp_data
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
if self.cfg.embed_dim > 0:
|
| 639 |
+
# VAE embed
|
| 640 |
+
self.pre_kl = nn.Linear(self.cfg.width, self.cfg.embed_dim * 2)
|
| 641 |
+
self.post_kl = nn.Linear(self.cfg.embed_dim, self.cfg.width)
|
| 642 |
+
self.latent_shape = (self.cfg.num_latents, self.cfg.embed_dim)
|
| 643 |
+
else:
|
| 644 |
+
self.latent_shape = (self.cfg.num_latents, self.cfg.width)
|
| 645 |
+
|
| 646 |
+
self.transformer = Perceiver(
|
| 647 |
+
n_ctx=self.cfg.num_latents,
|
| 648 |
+
width=self.cfg.width,
|
| 649 |
+
layers=self.cfg.num_decoder_layers,
|
| 650 |
+
heads=self.cfg.heads,
|
| 651 |
+
init_scale=self.cfg.init_scale,
|
| 652 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 653 |
+
qk_norm=self.cfg.qk_norm,
|
| 654 |
+
use_flash=self.cfg.use_flash,
|
| 655 |
+
use_checkpoint=self.cfg.use_checkpoint
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# decoder
|
| 659 |
+
self.decoder = PerceiverCrossAttentionDecoder(
|
| 660 |
+
embedder=self.embedder,
|
| 661 |
+
out_dim=self.cfg.out_dim,
|
| 662 |
+
num_latents=self.cfg.num_latents,
|
| 663 |
+
width=self.cfg.width,
|
| 664 |
+
heads=self.cfg.heads,
|
| 665 |
+
init_scale=self.cfg.init_scale,
|
| 666 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 667 |
+
qk_norm=self.cfg.qk_norm,
|
| 668 |
+
use_flash=self.cfg.use_flash,
|
| 669 |
+
use_checkpoint=self.cfg.use_checkpoint
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
if self.cfg.pretrained_model_name_or_path != "":
|
| 673 |
+
print(f"Loading pretrained VAE model from {self.cfg.pretrained_model_name_or_path}")
|
| 674 |
+
pretrained_ckpt = torch.load(self.cfg.pretrained_model_name_or_path, map_location="cpu")
|
| 675 |
+
if 'state_dict' in pretrained_ckpt:
|
| 676 |
+
_pretrained_ckpt = {}
|
| 677 |
+
for k, v in pretrained_ckpt['state_dict'].items():
|
| 678 |
+
if k.startswith('shape_model.'):
|
| 679 |
+
_pretrained_ckpt[k.replace('shape_model.', '')] = v
|
| 680 |
+
pretrained_ckpt = _pretrained_ckpt
|
| 681 |
+
self.load_state_dict(pretrained_ckpt, strict=True)
|
| 682 |
+
else:
|
| 683 |
+
_pretrained_ckpt = {}
|
| 684 |
+
for k, v in pretrained_ckpt.items():
|
| 685 |
+
if k.startswith('shape_model'):
|
| 686 |
+
_pretrained_ckpt[k.replace('shape_model.', '')] = v
|
| 687 |
+
pretrained_ckpt = _pretrained_ckpt
|
| 688 |
+
self.load_state_dict(pretrained_ckpt, strict=True)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
def encode(self,
|
| 692 |
+
surface: torch.FloatTensor,
|
| 693 |
+
sample_posterior: bool = True,
|
| 694 |
+
sharp_surface: torch.FloatTensor = None):
|
| 695 |
+
"""
|
| 696 |
+
Args:
|
| 697 |
+
surface (torch.FloatTensor): [B, N, 3+C]
|
| 698 |
+
sample_posterior (bool):
|
| 699 |
+
|
| 700 |
+
Returns:
|
| 701 |
+
shape_latents (torch.FloatTensor): [B, num_latents, width]
|
| 702 |
+
kl_embed (torch.FloatTensor): [B, num_latents, embed_dim]
|
| 703 |
+
posterior (DiagonalGaussianDistribution or None):
|
| 704 |
+
"""
|
| 705 |
+
assert surface.shape[-1] == 3 + self.cfg.point_feats, f"\
|
| 706 |
+
Expected {3 + self.cfg.point_feats} channels, got {surface.shape[-1]}"
|
| 707 |
+
|
| 708 |
+
pc, feats = surface[..., :3], surface[..., 3:] # B, n_samples, 3
|
| 709 |
+
bs, N, D = pc.shape
|
| 710 |
+
if N > self.cfg.n_samples:
|
| 711 |
+
# idx = furthest_point_sample(pc, self.cfg.n_samples) # (B, 3, npoint)
|
| 712 |
+
# pc = gather_operation(pc, idx).transpose(2, 1).contiguous()
|
| 713 |
+
# feats = gather_operation(feats, idx).transpose(2, 1).contiguous()
|
| 714 |
+
from torch_cluster import fps
|
| 715 |
+
flattened = pc.view(bs*N, D) # bs*N, 64
|
| 716 |
+
batch = torch.arange(bs).to(pc.device)
|
| 717 |
+
batch = torch.repeat_interleave(batch, N) # bs*N
|
| 718 |
+
pos = flattened.to(torch.float16)
|
| 719 |
+
ratio = self.cfg.n_samples / N
|
| 720 |
+
idx = fps(pos, batch, ratio=ratio).detach()
|
| 721 |
+
pc = pc.view(bs*N, -1)[idx].view(bs, -1, pc.shape[-1])
|
| 722 |
+
feats = feats.view(bs*N, -1)[idx].view(bs, -1, feats.shape[-1])
|
| 723 |
+
|
| 724 |
+
if sharp_surface is not None:
|
| 725 |
+
sharp_pc, sharp_feats = sharp_surface[..., :3], sharp_surface[..., 3:] # B, n_samples, 3
|
| 726 |
+
bs, N, D = sharp_pc.shape
|
| 727 |
+
if N > self.cfg.n_samples:
|
| 728 |
+
from torch_cluster import fps
|
| 729 |
+
flattened = sharp_pc.view(bs*N, D) # bs*N, 64
|
| 730 |
+
batch = torch.arange(bs).to(sharp_pc.device)
|
| 731 |
+
batch = torch.repeat_interleave(batch, N) # bs*N
|
| 732 |
+
pos = flattened.to(torch.float16)
|
| 733 |
+
ratio = self.cfg.n_samples / N
|
| 734 |
+
idx = fps(pos, batch, ratio=ratio).detach()
|
| 735 |
+
sharp_pc = sharp_pc.view(bs*N, -1)[idx].view(bs, -1, sharp_pc.shape[-1])
|
| 736 |
+
sharp_feats = sharp_feats.view(bs*N, -1)[idx].view(bs, -1, sharp_feats.shape[-1])
|
| 737 |
+
else:
|
| 738 |
+
sharp_pc, sharp_feats = None, None
|
| 739 |
+
|
| 740 |
+
shape_latents = self.encoder(pc, feats, sharp_pc, sharp_feats) # B, num_latents, width
|
| 741 |
+
kl_embed, posterior = self.encode_kl_embed(shape_latents, sample_posterior) # B, num_latents, embed_dim
|
| 742 |
+
|
| 743 |
+
return shape_latents, kl_embed, posterior
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
def decode(self,
|
| 747 |
+
latents: torch.FloatTensor):
|
| 748 |
+
"""
|
| 749 |
+
Args:
|
| 750 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 751 |
+
|
| 752 |
+
Returns:
|
| 753 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 754 |
+
"""
|
| 755 |
+
if latents.dtype == torch.bfloat16:
|
| 756 |
+
latents = latents.to(torch.float16)
|
| 757 |
+
latents = self.post_kl(latents) # [B, num_latents, embed_dim] -> [B, num_latents, width]
|
| 758 |
+
|
| 759 |
+
return self.transformer(latents)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def query(self,
|
| 763 |
+
queries: torch.FloatTensor,
|
| 764 |
+
latents: torch.FloatTensor):
|
| 765 |
+
"""
|
| 766 |
+
Args:
|
| 767 |
+
queries (torch.FloatTensor): [B, N, 3]
|
| 768 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 769 |
+
|
| 770 |
+
Returns:
|
| 771 |
+
logits (torch.FloatTensor): [B, N], occupancy logits
|
| 772 |
+
"""
|
| 773 |
+
|
| 774 |
+
logits = self.decoder(queries, latents)
|
| 775 |
+
|
| 776 |
+
return logits
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/michelangelo_autoencoder.py
ADDED
|
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
import random
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from einops import repeat, rearrange
|
| 9 |
+
|
| 10 |
+
import craftsman
|
| 11 |
+
from craftsman.models.transformers.perceiver_1d import Perceiver
|
| 12 |
+
from craftsman.models.transformers.attention import ResidualCrossAttentionBlock
|
| 13 |
+
from craftsman.utils.checkpoint import checkpoint
|
| 14 |
+
from craftsman.utils.base import BaseModule
|
| 15 |
+
from craftsman.utils.typing import *
|
| 16 |
+
from craftsman.utils.misc import get_world_size
|
| 17 |
+
from craftsman.utils.ops import generate_dense_grid_points
|
| 18 |
+
|
| 19 |
+
###################### Utils
|
| 20 |
+
VALID_EMBED_TYPES = ["identity", "fourier", "learned_fourier", "siren"]
|
| 21 |
+
|
| 22 |
+
class FourierEmbedder(nn.Module):
|
| 23 |
+
def __init__(self,
|
| 24 |
+
num_freqs: int = 6,
|
| 25 |
+
logspace: bool = True,
|
| 26 |
+
input_dim: int = 3,
|
| 27 |
+
include_input: bool = True,
|
| 28 |
+
include_pi: bool = True) -> None:
|
| 29 |
+
super().__init__()
|
| 30 |
+
|
| 31 |
+
if logspace:
|
| 32 |
+
frequencies = 2.0 ** torch.arange(
|
| 33 |
+
num_freqs,
|
| 34 |
+
dtype=torch.float32
|
| 35 |
+
)
|
| 36 |
+
else:
|
| 37 |
+
frequencies = torch.linspace(
|
| 38 |
+
1.0,
|
| 39 |
+
2.0 ** (num_freqs - 1),
|
| 40 |
+
num_freqs,
|
| 41 |
+
dtype=torch.float32
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
if include_pi:
|
| 45 |
+
frequencies *= torch.pi
|
| 46 |
+
|
| 47 |
+
self.register_buffer("frequencies", frequencies, persistent=False)
|
| 48 |
+
self.include_input = include_input
|
| 49 |
+
self.num_freqs = num_freqs
|
| 50 |
+
|
| 51 |
+
self.out_dim = self.get_dims(input_dim)
|
| 52 |
+
|
| 53 |
+
def get_dims(self, input_dim):
|
| 54 |
+
temp = 1 if self.include_input or self.num_freqs == 0 else 0
|
| 55 |
+
out_dim = input_dim * (self.num_freqs * 2 + temp)
|
| 56 |
+
|
| 57 |
+
return out_dim
|
| 58 |
+
|
| 59 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 60 |
+
if self.num_freqs > 0:
|
| 61 |
+
embed = (x[..., None].contiguous() * self.frequencies).view(*x.shape[:-1], -1)
|
| 62 |
+
if self.include_input:
|
| 63 |
+
return torch.cat((x, embed.sin(), embed.cos()), dim=-1)
|
| 64 |
+
else:
|
| 65 |
+
return torch.cat((embed.sin(), embed.cos()), dim=-1)
|
| 66 |
+
else:
|
| 67 |
+
return x
|
| 68 |
+
|
| 69 |
+
class LearnedFourierEmbedder(nn.Module):
|
| 70 |
+
def __init__(self, input_dim, dim):
|
| 71 |
+
super().__init__()
|
| 72 |
+
assert (dim % 2) == 0
|
| 73 |
+
half_dim = dim // 2
|
| 74 |
+
per_channel_dim = half_dim // input_dim
|
| 75 |
+
self.weights = nn.Parameter(torch.randn(per_channel_dim))
|
| 76 |
+
|
| 77 |
+
self.out_dim = self.get_dims(input_dim)
|
| 78 |
+
|
| 79 |
+
def forward(self, x):
|
| 80 |
+
# [b, t, c, 1] * [1, d] = [b, t, c, d] -> [b, t, c * d]
|
| 81 |
+
freqs = (x[..., None] * self.weights[None] * 2 * np.pi).view(*x.shape[:-1], -1)
|
| 82 |
+
fouriered = torch.cat((x, freqs.sin(), freqs.cos()), dim=-1)
|
| 83 |
+
return fouriered
|
| 84 |
+
|
| 85 |
+
def get_dims(self, input_dim):
|
| 86 |
+
return input_dim * (self.weights.shape[0] * 2 + 1)
|
| 87 |
+
|
| 88 |
+
class Sine(nn.Module):
|
| 89 |
+
def __init__(self, w0 = 1.):
|
| 90 |
+
super().__init__()
|
| 91 |
+
self.w0 = w0
|
| 92 |
+
def forward(self, x):
|
| 93 |
+
return torch.sin(self.w0 * x)
|
| 94 |
+
|
| 95 |
+
class Siren(nn.Module):
|
| 96 |
+
def __init__(
|
| 97 |
+
self,
|
| 98 |
+
in_dim,
|
| 99 |
+
out_dim,
|
| 100 |
+
w0 = 1.,
|
| 101 |
+
c = 6.,
|
| 102 |
+
is_first = False,
|
| 103 |
+
use_bias = True,
|
| 104 |
+
activation = None,
|
| 105 |
+
dropout = 0.
|
| 106 |
+
):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self.in_dim = in_dim
|
| 109 |
+
self.out_dim = out_dim
|
| 110 |
+
self.is_first = is_first
|
| 111 |
+
|
| 112 |
+
weight = torch.zeros(out_dim, in_dim)
|
| 113 |
+
bias = torch.zeros(out_dim) if use_bias else None
|
| 114 |
+
self.init_(weight, bias, c = c, w0 = w0)
|
| 115 |
+
|
| 116 |
+
self.weight = nn.Parameter(weight)
|
| 117 |
+
self.bias = nn.Parameter(bias) if use_bias else None
|
| 118 |
+
self.activation = Sine(w0) if activation is None else activation
|
| 119 |
+
self.dropout = nn.Dropout(dropout)
|
| 120 |
+
|
| 121 |
+
def init_(self, weight, bias, c, w0):
|
| 122 |
+
dim = self.in_dim
|
| 123 |
+
|
| 124 |
+
w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
|
| 125 |
+
weight.uniform_(-w_std, w_std)
|
| 126 |
+
|
| 127 |
+
if bias is not None:
|
| 128 |
+
bias.uniform_(-w_std, w_std)
|
| 129 |
+
|
| 130 |
+
def forward(self, x):
|
| 131 |
+
out = F.linear(x, self.weight, self.bias)
|
| 132 |
+
out = self.activation(out)
|
| 133 |
+
out = self.dropout(out)
|
| 134 |
+
return out
|
| 135 |
+
|
| 136 |
+
def get_embedder(embed_type="fourier", num_freqs=-1, input_dim=3, include_pi=True):
|
| 137 |
+
if embed_type == "identity" or (embed_type == "fourier" and num_freqs == -1):
|
| 138 |
+
return nn.Identity(), input_dim
|
| 139 |
+
|
| 140 |
+
elif embed_type == "fourier":
|
| 141 |
+
embedder_obj = FourierEmbedder(num_freqs=num_freqs, include_pi=include_pi)
|
| 142 |
+
|
| 143 |
+
elif embed_type == "learned_fourier":
|
| 144 |
+
embedder_obj = LearnedFourierEmbedder(in_channels=input_dim, dim=num_freqs)
|
| 145 |
+
|
| 146 |
+
elif embed_type == "siren":
|
| 147 |
+
embedder_obj = Siren(in_dim=input_dim, out_dim=num_freqs * input_dim * 2 + input_dim)
|
| 148 |
+
|
| 149 |
+
else:
|
| 150 |
+
raise ValueError(f"{embed_type} is not valid. Currently only supprts {VALID_EMBED_TYPES}")
|
| 151 |
+
return embedder_obj
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
###################### AutoEncoder
|
| 155 |
+
class AutoEncoder(BaseModule):
|
| 156 |
+
@dataclass
|
| 157 |
+
class Config(BaseModule.Config):
|
| 158 |
+
pretrained_model_name_or_path: str = ""
|
| 159 |
+
num_latents: int = 256
|
| 160 |
+
embed_dim: int = 64
|
| 161 |
+
width: int = 768
|
| 162 |
+
|
| 163 |
+
cfg: Config
|
| 164 |
+
|
| 165 |
+
def configure(self) -> None:
|
| 166 |
+
super().configure()
|
| 167 |
+
|
| 168 |
+
def encode(self, x: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
| 169 |
+
raise NotImplementedError
|
| 170 |
+
|
| 171 |
+
def decode(self, z: torch.FloatTensor) -> torch.FloatTensor:
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
def encode_kl_embed(self, latents: torch.FloatTensor, sample_posterior: bool = True):
|
| 175 |
+
posterior = None
|
| 176 |
+
if self.cfg.embed_dim > 0:
|
| 177 |
+
moments = self.pre_kl(latents)
|
| 178 |
+
posterior = DiagonalGaussianDistribution(moments, feat_dim=-1)
|
| 179 |
+
if sample_posterior:
|
| 180 |
+
kl_embed = posterior.sample()
|
| 181 |
+
else:
|
| 182 |
+
kl_embed = posterior.mode()
|
| 183 |
+
else:
|
| 184 |
+
kl_embed = latents
|
| 185 |
+
return kl_embed, posterior
|
| 186 |
+
|
| 187 |
+
def forward(self,
|
| 188 |
+
surface: torch.FloatTensor,
|
| 189 |
+
queries: torch.FloatTensor,
|
| 190 |
+
sample_posterior: bool = True):
|
| 191 |
+
shape_latents, kl_embed, posterior = self.encode(surface, sample_posterior=sample_posterior)
|
| 192 |
+
|
| 193 |
+
latents = self.decode(kl_embed) # [B, num_latents, width]
|
| 194 |
+
|
| 195 |
+
logits = self.query(queries, latents) # [B,]
|
| 196 |
+
|
| 197 |
+
return shape_latents, latents, posterior, logits
|
| 198 |
+
|
| 199 |
+
def query(self, queries: torch.FloatTensor, latents: torch.FloatTensor) -> torch.FloatTensor:
|
| 200 |
+
raise NotImplementedError
|
| 201 |
+
|
| 202 |
+
@torch.no_grad()
|
| 203 |
+
def extract_geometry(self,
|
| 204 |
+
latents: torch.FloatTensor,
|
| 205 |
+
extract_mesh_func: str = "mc",
|
| 206 |
+
bounds: Union[Tuple[float], List[float], float] = (-1.05, -1.05, -1.05, 1.05, 1.05, 1.05),
|
| 207 |
+
octree_depth: int = 8,
|
| 208 |
+
num_chunks: int = 100000,
|
| 209 |
+
use_flashVDM: bool = True,
|
| 210 |
+
):
|
| 211 |
+
|
| 212 |
+
batch_size = latents.shape[0]
|
| 213 |
+
if isinstance(bounds, float):
|
| 214 |
+
bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
|
| 215 |
+
|
| 216 |
+
bbox_min = np.array(bounds[0:3])
|
| 217 |
+
bbox_max = np.array(bounds[3:6])
|
| 218 |
+
bbox_size = bbox_max - bbox_min
|
| 219 |
+
|
| 220 |
+
if use_flashVDM:
|
| 221 |
+
from .volume_decoders import HierarchicalVolumeDecoding
|
| 222 |
+
volume_decoder = HierarchicalVolumeDecoding()
|
| 223 |
+
grid_logits = volume_decoder(latents, self.query, \
|
| 224 |
+
**{'bounds': bounds, 'octree_resolution': 2**octree_depth, 'num_chunks': num_chunks})
|
| 225 |
+
grid_logits = grid_logits.cpu().float().numpy()
|
| 226 |
+
grid_size = grid_logits.shape[1:4]
|
| 227 |
+
|
| 228 |
+
else:
|
| 229 |
+
start_time_generate_dense_grid_points = time.time()
|
| 230 |
+
xyz_samples, grid_size, length = generate_dense_grid_points(
|
| 231 |
+
bbox_min=bbox_min,
|
| 232 |
+
bbox_max=bbox_max,
|
| 233 |
+
octree_depth=octree_depth,
|
| 234 |
+
indexing="ij"
|
| 235 |
+
)
|
| 236 |
+
xyz_samples = torch.FloatTensor(xyz_samples)
|
| 237 |
+
print(f"generate_dense_grid_points time: {time.time()-start_time_generate_dense_grid_points}")
|
| 238 |
+
|
| 239 |
+
start_time_query_sdf = time.time()
|
| 240 |
+
batch_logits = []
|
| 241 |
+
for start in range(0, xyz_samples.shape[0], num_chunks):
|
| 242 |
+
queries = xyz_samples[start: start + num_chunks, :].to(latents)
|
| 243 |
+
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
| 244 |
+
|
| 245 |
+
logits = self.query(batch_queries, latents)
|
| 246 |
+
batch_logits.append(logits.cpu())
|
| 247 |
+
print(f"query_sdf time: {time.time()-start_time_query_sdf}")
|
| 248 |
+
|
| 249 |
+
grid_logits = torch.cat(batch_logits, dim=1).view((batch_size, grid_size[0], grid_size[1], grid_size[2])).float().numpy()
|
| 250 |
+
|
| 251 |
+
start_time_extract_mesh = time.time()
|
| 252 |
+
mesh_v_f = []
|
| 253 |
+
has_surface = np.zeros((batch_size,), dtype=np.bool_)
|
| 254 |
+
for i in range(batch_size):
|
| 255 |
+
try:
|
| 256 |
+
if extract_mesh_func == "mc":
|
| 257 |
+
from skimage import measure
|
| 258 |
+
vertices, faces, normals, _ = measure.marching_cubes(grid_logits[i], 0)
|
| 259 |
+
vertices = vertices / grid_size * bbox_size + bbox_min
|
| 260 |
+
faces = faces[:, [2, 1, 0]]
|
| 261 |
+
elif extract_mesh_func == "diffmc":
|
| 262 |
+
from diso import DiffMC
|
| 263 |
+
diffmc = DiffMC(dtype=torch.float32).to(latents.device)
|
| 264 |
+
vertices, faces = diffmc(-torch.tensor(grid_logits[i]).float().to(latents.device), isovalue=0)
|
| 265 |
+
vertices = vertices * 2 - 1
|
| 266 |
+
vertices = vertices.cpu().numpy()
|
| 267 |
+
faces = faces.cpu().numpy()
|
| 268 |
+
faces = faces[:, [2, 1, 0]]
|
| 269 |
+
elif extract_mesh_func == "diffdmc":
|
| 270 |
+
from diso import DiffDMC
|
| 271 |
+
diffmc = DiffDMC(dtype=torch.float32).to(latents.device)
|
| 272 |
+
vertices, faces = diffmc(-torch.tensor(grid_logits[i]).float().to(latents.device), isovalue=0)
|
| 273 |
+
vertices = vertices * 2 - 1
|
| 274 |
+
vertices = vertices.cpu().numpy()
|
| 275 |
+
faces = faces.cpu().numpy()
|
| 276 |
+
faces = faces[:, [2, 1, 0]]
|
| 277 |
+
else:
|
| 278 |
+
raise NotImplementedError(f"{extract_mesh_func} not implement")
|
| 279 |
+
mesh_v_f.append((vertices.astype(np.float32), np.ascontiguousarray(faces.astype(np.int64))))
|
| 280 |
+
has_surface[i] = True
|
| 281 |
+
except:
|
| 282 |
+
mesh_v_f.append((None, None))
|
| 283 |
+
has_surface[i] = False
|
| 284 |
+
|
| 285 |
+
print(f"extract_mesh time: {time.time()-start_time_extract_mesh}")
|
| 286 |
+
return mesh_v_f, has_surface
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class DiagonalGaussianDistribution(object):
|
| 290 |
+
def __init__(self, parameters: Union[torch.Tensor, List[torch.Tensor]], deterministic=False, feat_dim=1):
|
| 291 |
+
self.feat_dim = feat_dim
|
| 292 |
+
self.parameters = parameters
|
| 293 |
+
|
| 294 |
+
if isinstance(parameters, list):
|
| 295 |
+
self.mean = parameters[0]
|
| 296 |
+
self.logvar = parameters[1]
|
| 297 |
+
else:
|
| 298 |
+
self.mean, self.logvar = torch.chunk(parameters, 2, dim=feat_dim)
|
| 299 |
+
|
| 300 |
+
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
| 301 |
+
self.deterministic = deterministic
|
| 302 |
+
self.std = torch.exp(0.5 * self.logvar)
|
| 303 |
+
self.var = torch.exp(self.logvar)
|
| 304 |
+
if self.deterministic:
|
| 305 |
+
self.var = self.std = torch.zeros_like(self.mean)
|
| 306 |
+
|
| 307 |
+
def sample(self):
|
| 308 |
+
x = self.mean + self.std * torch.randn_like(self.mean)
|
| 309 |
+
return x
|
| 310 |
+
|
| 311 |
+
def kl(self, other=None, dims=(1, 2)):
|
| 312 |
+
if self.deterministic:
|
| 313 |
+
return torch.Tensor([0.])
|
| 314 |
+
else:
|
| 315 |
+
if other is None:
|
| 316 |
+
return 0.5 * torch.mean(torch.pow(self.mean, 2)
|
| 317 |
+
+ self.var - 1.0 - self.logvar,
|
| 318 |
+
dim=dims)
|
| 319 |
+
else:
|
| 320 |
+
return 0.5 * torch.mean(
|
| 321 |
+
torch.pow(self.mean - other.mean, 2) / other.var
|
| 322 |
+
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
| 323 |
+
dim=dims)
|
| 324 |
+
|
| 325 |
+
def nll(self, sample, dims=(1, 2)):
|
| 326 |
+
if self.deterministic:
|
| 327 |
+
return torch.Tensor([0.])
|
| 328 |
+
logtwopi = np.log(2.0 * np.pi)
|
| 329 |
+
return 0.5 * torch.sum(
|
| 330 |
+
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
| 331 |
+
dim=dims)
|
| 332 |
+
|
| 333 |
+
def mode(self):
|
| 334 |
+
return self.mean
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class PerceiverCrossAttentionEncoder(nn.Module):
|
| 338 |
+
def __init__(self,
|
| 339 |
+
use_downsample: bool,
|
| 340 |
+
num_latents: int,
|
| 341 |
+
embedder: FourierEmbedder,
|
| 342 |
+
point_feats: int,
|
| 343 |
+
embed_point_feats: bool,
|
| 344 |
+
width: int,
|
| 345 |
+
heads: int,
|
| 346 |
+
layers: int,
|
| 347 |
+
init_scale: float = 0.25,
|
| 348 |
+
qkv_bias: bool = True,
|
| 349 |
+
use_ln_post: bool = False,
|
| 350 |
+
use_flash: bool = False,
|
| 351 |
+
use_checkpoint: bool = False,
|
| 352 |
+
use_multi_reso: bool = False,
|
| 353 |
+
resolutions: list = [],
|
| 354 |
+
sampling_prob: list = []):
|
| 355 |
+
|
| 356 |
+
super().__init__()
|
| 357 |
+
|
| 358 |
+
self.use_checkpoint = use_checkpoint
|
| 359 |
+
self.num_latents = num_latents
|
| 360 |
+
self.use_downsample = use_downsample
|
| 361 |
+
self.embed_point_feats = embed_point_feats
|
| 362 |
+
self.use_multi_reso = use_multi_reso
|
| 363 |
+
self.resolutions = resolutions
|
| 364 |
+
self.sampling_prob = sampling_prob
|
| 365 |
+
|
| 366 |
+
if not self.use_downsample:
|
| 367 |
+
self.query = nn.Parameter(torch.randn((num_latents, width)) * 0.02)
|
| 368 |
+
|
| 369 |
+
self.embedder = embedder
|
| 370 |
+
if self.embed_point_feats:
|
| 371 |
+
self.input_proj = nn.Linear(self.embedder.out_dim * 2, width)
|
| 372 |
+
else:
|
| 373 |
+
self.input_proj = nn.Linear(self.embedder.out_dim + point_feats, width)
|
| 374 |
+
|
| 375 |
+
self.cross_attn = ResidualCrossAttentionBlock(
|
| 376 |
+
width=width,
|
| 377 |
+
heads=heads,
|
| 378 |
+
init_scale=init_scale,
|
| 379 |
+
qkv_bias=qkv_bias,
|
| 380 |
+
use_flash=use_flash,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
self.self_attn = Perceiver(
|
| 384 |
+
n_ctx=num_latents,
|
| 385 |
+
width=width,
|
| 386 |
+
layers=layers,
|
| 387 |
+
heads=heads,
|
| 388 |
+
init_scale=init_scale,
|
| 389 |
+
qkv_bias=qkv_bias,
|
| 390 |
+
use_flash=use_flash,
|
| 391 |
+
use_checkpoint=False
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
if use_ln_post:
|
| 395 |
+
self.ln_post = nn.LayerNorm(width)
|
| 396 |
+
else:
|
| 397 |
+
self.ln_post = None
|
| 398 |
+
|
| 399 |
+
def _forward(self, pc, feats):
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
Args:
|
| 403 |
+
pc (torch.FloatTensor): [B, N, 3]
|
| 404 |
+
feats (torch.FloatTensor or None): [B, N, C]
|
| 405 |
+
|
| 406 |
+
Returns:
|
| 407 |
+
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
bs, N, D = pc.shape
|
| 411 |
+
|
| 412 |
+
data = self.embedder(pc)
|
| 413 |
+
if feats is not None:
|
| 414 |
+
if self.embed_point_feats:
|
| 415 |
+
feats = self.embedder(feats)
|
| 416 |
+
data = torch.cat([data, feats], dim=-1)
|
| 417 |
+
data = self.input_proj(data)
|
| 418 |
+
|
| 419 |
+
if self.use_multi_reso:
|
| 420 |
+
# number = 8192
|
| 421 |
+
resolution = random.choice(self.resolutions, size=1, p=self.sampling_prob)[0]
|
| 422 |
+
|
| 423 |
+
if resolution != N:
|
| 424 |
+
|
| 425 |
+
flattened = pc.view(bs*N, D) # bs*N, 64. 103,4096,3 -> 421888,3
|
| 426 |
+
batch = torch.arange(bs).to(pc.device) # 103
|
| 427 |
+
batch = torch.repeat_interleave(batch, N) # bs*N. 421888
|
| 428 |
+
pos = flattened
|
| 429 |
+
ratio = 1.0 * resolution / N # 0.0625
|
| 430 |
+
idx = fps(pos, batch, ratio=ratio) #26368
|
| 431 |
+
pc = pc.view(bs*N, -1)[idx].view(bs, -1, D)
|
| 432 |
+
bs,N,D=feats.shape
|
| 433 |
+
flattened1 = feats.view(bs*N, D)
|
| 434 |
+
feats= flattened1.view(bs*N, -1)[idx].view(bs, -1, D)
|
| 435 |
+
bs, N, D = pc.shape
|
| 436 |
+
|
| 437 |
+
if self.use_downsample:
|
| 438 |
+
###### fps
|
| 439 |
+
from torch_cluster import fps
|
| 440 |
+
flattened = pc.view(bs*N, D) # bs*N, 64
|
| 441 |
+
|
| 442 |
+
batch = torch.arange(bs).to(pc.device)
|
| 443 |
+
batch = torch.repeat_interleave(batch, N) # bs*N
|
| 444 |
+
|
| 445 |
+
pos = flattened
|
| 446 |
+
|
| 447 |
+
ratio = 1.0 * self.num_latents / N
|
| 448 |
+
|
| 449 |
+
idx = fps(pos, batch, ratio=ratio)
|
| 450 |
+
|
| 451 |
+
query = data.view(bs*N, -1)[idx].view(bs, -1, data.shape[-1])
|
| 452 |
+
else:
|
| 453 |
+
query = self.query
|
| 454 |
+
query = repeat(query, "m c -> b m c", b=bs)
|
| 455 |
+
|
| 456 |
+
latents = self.cross_attn(query, data)
|
| 457 |
+
latents = self.self_attn(latents)
|
| 458 |
+
|
| 459 |
+
if self.ln_post is not None:
|
| 460 |
+
latents = self.ln_post(latents)
|
| 461 |
+
|
| 462 |
+
return latents
|
| 463 |
+
|
| 464 |
+
def forward(self, pc: torch.FloatTensor, feats: Optional[torch.FloatTensor] = None):
|
| 465 |
+
"""
|
| 466 |
+
|
| 467 |
+
Args:
|
| 468 |
+
pc (torch.FloatTensor): [B, N, 3]
|
| 469 |
+
feats (torch.FloatTensor or None): [B, N, C]
|
| 470 |
+
|
| 471 |
+
Returns:
|
| 472 |
+
dict
|
| 473 |
+
"""
|
| 474 |
+
|
| 475 |
+
return checkpoint(self._forward, (pc, feats), self.parameters(), self.use_checkpoint)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class PerceiverCrossAttentionDecoder(nn.Module):
|
| 479 |
+
|
| 480 |
+
def __init__(self,
|
| 481 |
+
num_latents: int,
|
| 482 |
+
out_dim: int,
|
| 483 |
+
embedder: FourierEmbedder,
|
| 484 |
+
width: int,
|
| 485 |
+
heads: int,
|
| 486 |
+
init_scale: float = 0.25,
|
| 487 |
+
qkv_bias: bool = True,
|
| 488 |
+
use_flash: bool = False,
|
| 489 |
+
use_checkpoint: bool = False):
|
| 490 |
+
|
| 491 |
+
super().__init__()
|
| 492 |
+
|
| 493 |
+
self.use_checkpoint = use_checkpoint
|
| 494 |
+
self.embedder = embedder
|
| 495 |
+
|
| 496 |
+
self.query_proj = nn.Linear(self.embedder.out_dim, width)
|
| 497 |
+
|
| 498 |
+
self.cross_attn_decoder = ResidualCrossAttentionBlock(
|
| 499 |
+
n_data=num_latents,
|
| 500 |
+
width=width,
|
| 501 |
+
heads=heads,
|
| 502 |
+
init_scale=init_scale,
|
| 503 |
+
qkv_bias=qkv_bias,
|
| 504 |
+
use_flash=use_flash
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
self.ln_post = nn.LayerNorm(width)
|
| 508 |
+
self.output_proj = nn.Linear(width, out_dim)
|
| 509 |
+
|
| 510 |
+
def _forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor):
|
| 511 |
+
queries = self.query_proj(self.embedder(queries))
|
| 512 |
+
x = self.cross_attn_decoder(queries, latents)
|
| 513 |
+
x = self.ln_post(x)
|
| 514 |
+
x = self.output_proj(x)
|
| 515 |
+
return x
|
| 516 |
+
|
| 517 |
+
def forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor):
|
| 518 |
+
return checkpoint(self._forward, (queries, latents), self.parameters(), self.use_checkpoint)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
@craftsman.register("michelangelo-autoencoder")
|
| 522 |
+
class MichelangeloAutoencoder(AutoEncoder):
|
| 523 |
+
r"""
|
| 524 |
+
A VAE model for encoding shapes into latents and decoding latent representations into shapes.
|
| 525 |
+
"""
|
| 526 |
+
|
| 527 |
+
@dataclass
|
| 528 |
+
class Config(BaseModule.Config):
|
| 529 |
+
pretrained_model_name_or_path: str = ""
|
| 530 |
+
n_samples: int = 4096
|
| 531 |
+
use_downsample: bool = False
|
| 532 |
+
downsample_ratio: float = 0.0625
|
| 533 |
+
num_latents: int = 256
|
| 534 |
+
point_feats: int = 0
|
| 535 |
+
embed_point_feats: bool = False
|
| 536 |
+
out_dim: int = 1
|
| 537 |
+
embed_dim: int = 64
|
| 538 |
+
embed_type: str = "fourier"
|
| 539 |
+
num_freqs: int = 8
|
| 540 |
+
include_pi: bool = True
|
| 541 |
+
width: int = 768
|
| 542 |
+
heads: int = 12
|
| 543 |
+
num_encoder_layers: int = 8
|
| 544 |
+
num_decoder_layers: int = 16
|
| 545 |
+
init_scale: float = 0.25
|
| 546 |
+
qkv_bias: bool = True
|
| 547 |
+
use_ln_post: bool = False
|
| 548 |
+
use_flash: bool = False
|
| 549 |
+
use_checkpoint: bool = True
|
| 550 |
+
use_multi_reso: Optional[bool] = False
|
| 551 |
+
resolutions: Optional[List[int]] = None
|
| 552 |
+
sampling_prob: Optional[List[float]] = None
|
| 553 |
+
|
| 554 |
+
cfg: Config
|
| 555 |
+
|
| 556 |
+
def configure(self) -> None:
|
| 557 |
+
super().configure()
|
| 558 |
+
|
| 559 |
+
self.embedder = get_embedder(embed_type=self.cfg.embed_type, num_freqs=self.cfg.num_freqs, include_pi=self.cfg.include_pi)
|
| 560 |
+
|
| 561 |
+
# encoder
|
| 562 |
+
self.cfg.init_scale = self.cfg.init_scale * math.sqrt(1.0 / self.cfg.width)
|
| 563 |
+
self.encoder = PerceiverCrossAttentionEncoder(
|
| 564 |
+
use_downsample=self.cfg.use_downsample,
|
| 565 |
+
embedder=self.embedder,
|
| 566 |
+
num_latents=self.cfg.num_latents,
|
| 567 |
+
point_feats=self.cfg.point_feats,
|
| 568 |
+
embed_point_feats=self.cfg.embed_point_feats,
|
| 569 |
+
width=self.cfg.width,
|
| 570 |
+
heads=self.cfg.heads,
|
| 571 |
+
layers=self.cfg.num_encoder_layers,
|
| 572 |
+
init_scale=self.cfg.init_scale,
|
| 573 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 574 |
+
use_ln_post=self.cfg.use_ln_post,
|
| 575 |
+
use_flash=self.cfg.use_flash,
|
| 576 |
+
use_checkpoint=self.cfg.use_checkpoint,
|
| 577 |
+
use_multi_reso=self.cfg.use_multi_reso,
|
| 578 |
+
resolutions=self.cfg.resolutions,
|
| 579 |
+
sampling_prob=self.cfg.sampling_prob
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
if self.cfg.embed_dim > 0:
|
| 583 |
+
# VAE embed
|
| 584 |
+
self.pre_kl = nn.Linear(self.cfg.width, self.cfg.embed_dim * 2)
|
| 585 |
+
self.post_kl = nn.Linear(self.cfg.embed_dim, self.cfg.width)
|
| 586 |
+
self.latent_shape = (self.cfg.num_latents, self.cfg.embed_dim)
|
| 587 |
+
else:
|
| 588 |
+
self.latent_shape = (self.cfg.num_latents, self.cfg.width)
|
| 589 |
+
|
| 590 |
+
self.transformer = Perceiver(
|
| 591 |
+
n_ctx=self.cfg.num_latents,
|
| 592 |
+
width=self.cfg.width,
|
| 593 |
+
layers=self.cfg.num_decoder_layers,
|
| 594 |
+
heads=self.cfg.heads,
|
| 595 |
+
init_scale=self.cfg.init_scale,
|
| 596 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 597 |
+
use_flash=self.cfg.use_flash,
|
| 598 |
+
use_checkpoint=self.cfg.use_checkpoint
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
# decoder
|
| 602 |
+
self.decoder = PerceiverCrossAttentionDecoder(
|
| 603 |
+
embedder=self.embedder,
|
| 604 |
+
out_dim=self.cfg.out_dim,
|
| 605 |
+
num_latents=self.cfg.num_latents,
|
| 606 |
+
width=self.cfg.width,
|
| 607 |
+
heads=self.cfg.heads,
|
| 608 |
+
init_scale=self.cfg.init_scale,
|
| 609 |
+
qkv_bias=self.cfg.qkv_bias,
|
| 610 |
+
use_flash=self.cfg.use_flash,
|
| 611 |
+
use_checkpoint=self.cfg.use_checkpoint
|
| 612 |
+
)
|
| 613 |
+
|
| 614 |
+
if self.cfg.pretrained_model_name_or_path != "":
|
| 615 |
+
print(f"Loading pretrained model from {self.cfg.pretrained_model_name_or_path}")
|
| 616 |
+
pretrained_ckpt = torch.load(self.cfg.pretrained_model_name_or_path, map_location="cpu")
|
| 617 |
+
if 'state_dict' in pretrained_ckpt:
|
| 618 |
+
_pretrained_ckpt = {}
|
| 619 |
+
for k, v in pretrained_ckpt['state_dict'].items():
|
| 620 |
+
if k.startswith('shape_model.'):
|
| 621 |
+
_pretrained_ckpt[k.replace('shape_model.', '')] = v
|
| 622 |
+
pretrained_ckpt = _pretrained_ckpt
|
| 623 |
+
else:
|
| 624 |
+
_pretrained_ckpt = {}
|
| 625 |
+
for k, v in pretrained_ckpt.items():
|
| 626 |
+
if k.startswith('shape_model.'):
|
| 627 |
+
_pretrained_ckpt[k.replace('shape_model.', '')] = v
|
| 628 |
+
pretrained_ckpt = _pretrained_ckpt
|
| 629 |
+
|
| 630 |
+
self.load_state_dict(pretrained_ckpt, strict=False)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def encode(self,
|
| 634 |
+
surface: torch.FloatTensor,
|
| 635 |
+
sample_posterior: bool = True):
|
| 636 |
+
"""
|
| 637 |
+
Args:
|
| 638 |
+
surface (torch.FloatTensor): [B, N, 3+C]
|
| 639 |
+
sample_posterior (bool):
|
| 640 |
+
|
| 641 |
+
Returns:
|
| 642 |
+
shape_latents (torch.FloatTensor): [B, num_latents, width]
|
| 643 |
+
kl_embed (torch.FloatTensor): [B, num_latents, embed_dim]
|
| 644 |
+
posterior (DiagonalGaussianDistribution or None):
|
| 645 |
+
"""
|
| 646 |
+
assert surface.shape[-1] == 3 + self.cfg.point_feats, f"\
|
| 647 |
+
Expected {3 + self.cfg.point_feats} channels, got {surface.shape[-1]}"
|
| 648 |
+
|
| 649 |
+
pc, feats = surface[..., :3], surface[..., 3:] # B, n_samples, 3
|
| 650 |
+
bs, N, D = pc.shape
|
| 651 |
+
if N > self.cfg.n_samples:
|
| 652 |
+
# idx = furthest_point_sample(pc, self.cfg.n_samples) # (B, 3, npoint)
|
| 653 |
+
# pc = gather_operation(pc, idx).transpose(2, 1).contiguous()
|
| 654 |
+
# feats = gather_operation(feats, idx).transpose(2, 1).contiguous()
|
| 655 |
+
from torch_cluster import fps
|
| 656 |
+
flattened = pc.view(bs*N, D) # bs*N, 64
|
| 657 |
+
batch = torch.arange(bs).to(pc.device)
|
| 658 |
+
batch = torch.repeat_interleave(batch, N) # bs*N
|
| 659 |
+
pos = flattened
|
| 660 |
+
ratio = self.cfg.n_samples / N
|
| 661 |
+
idx = fps(pos, batch, ratio=ratio)
|
| 662 |
+
pc = pc.view(bs*N, -1)[idx].view(bs, -1, pc.shape[-1])
|
| 663 |
+
feats = feats.view(bs*N, -1)[idx].view(bs, -1, feats.shape[-1])
|
| 664 |
+
|
| 665 |
+
shape_latents = self.encoder(pc, feats) # B, num_latents, width
|
| 666 |
+
kl_embed, posterior = self.encode_kl_embed(shape_latents, sample_posterior) # B, num_latents, embed_dim
|
| 667 |
+
|
| 668 |
+
return shape_latents, kl_embed, posterior
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def decode(self,
|
| 672 |
+
latents: torch.FloatTensor):
|
| 673 |
+
"""
|
| 674 |
+
Args:
|
| 675 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 676 |
+
|
| 677 |
+
Returns:
|
| 678 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 679 |
+
"""
|
| 680 |
+
latents = self.post_kl(latents) # [B, num_latents, embed_dim] -> [B, num_latents, width]
|
| 681 |
+
|
| 682 |
+
return self.transformer(latents)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
def query(self,
|
| 686 |
+
queries: torch.FloatTensor,
|
| 687 |
+
latents: torch.FloatTensor):
|
| 688 |
+
"""
|
| 689 |
+
Args:
|
| 690 |
+
queries (torch.FloatTensor): [B, N, 3]
|
| 691 |
+
latents (torch.FloatTensor): [B, embed_dim]
|
| 692 |
+
|
| 693 |
+
Returns:
|
| 694 |
+
logits (torch.FloatTensor): [B, N], occupancy logits
|
| 695 |
+
"""
|
| 696 |
+
|
| 697 |
+
logits = self.decoder(queries, latents)
|
| 698 |
+
|
| 699 |
+
return logits
|
Code/Baselines/CraftsMan3D/craftsman/models/autoencoders/volume_decoders.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
| 2 |
+
# except for the third-party components listed below.
|
| 3 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
| 4 |
+
# in the repsective licenses of these third-party components.
|
| 5 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
| 6 |
+
# components and must ensure that the usage of the third party components adheres to
|
| 7 |
+
# all relevant laws and regulations.
|
| 8 |
+
|
| 9 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
| 10 |
+
# their software and algorithms, including trained model weights, parameters (including
|
| 11 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
| 12 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
| 13 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
| 14 |
+
|
| 15 |
+
from typing import Union, Tuple, List, Callable
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from einops import repeat
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
def extract_near_surface_volume_fn(input_tensor: torch.Tensor, alpha: float):
|
| 25 |
+
device = input_tensor.device
|
| 26 |
+
D = input_tensor.shape[0]
|
| 27 |
+
signed_val = 0.0
|
| 28 |
+
|
| 29 |
+
# 添加偏移并处理无效值
|
| 30 |
+
val = input_tensor + alpha
|
| 31 |
+
valid_mask = val > -9000 # 假设-9000是无效值
|
| 32 |
+
|
| 33 |
+
# 改进的邻居获取函数(保持维度一致)
|
| 34 |
+
def get_neighbor(t, shift, axis):
|
| 35 |
+
"""根据指定轴进行位移并保持维度一致"""
|
| 36 |
+
if shift == 0:
|
| 37 |
+
return t.clone()
|
| 38 |
+
|
| 39 |
+
# 确定填充轴(输入为[D, D, D]对应z,y,x轴)
|
| 40 |
+
pad_dims = [0, 0, 0, 0, 0, 0] # 格式:[x前,x后,y前,y后,z前,z后]
|
| 41 |
+
|
| 42 |
+
# 根据轴类型设置填充
|
| 43 |
+
if axis == 0: # x轴(最后一个维度)
|
| 44 |
+
pad_idx = 0 if shift > 0 else 1
|
| 45 |
+
pad_dims[pad_idx] = abs(shift)
|
| 46 |
+
elif axis == 1: # y轴(中间维度)
|
| 47 |
+
pad_idx = 2 if shift > 0 else 3
|
| 48 |
+
pad_dims[pad_idx] = abs(shift)
|
| 49 |
+
elif axis == 2: # z轴(第一个维度)
|
| 50 |
+
pad_idx = 4 if shift > 0 else 5
|
| 51 |
+
pad_dims[pad_idx] = abs(shift)
|
| 52 |
+
|
| 53 |
+
# 执行填充(添加batch和channel维度适配F.pad)
|
| 54 |
+
padded = F.pad(t.unsqueeze(0).unsqueeze(0), pad_dims[::-1], mode='replicate') # 反转顺序适配F.pad
|
| 55 |
+
|
| 56 |
+
# 构建动态切片索引
|
| 57 |
+
slice_dims = [slice(None)] * 3 # 初始化为全切片
|
| 58 |
+
if axis == 0: # x轴(dim=2)
|
| 59 |
+
if shift > 0:
|
| 60 |
+
slice_dims[0] = slice(shift, None)
|
| 61 |
+
else:
|
| 62 |
+
slice_dims[0] = slice(None, shift)
|
| 63 |
+
elif axis == 1: # y轴(dim=1)
|
| 64 |
+
if shift > 0:
|
| 65 |
+
slice_dims[1] = slice(shift, None)
|
| 66 |
+
else:
|
| 67 |
+
slice_dims[1] = slice(None, shift)
|
| 68 |
+
elif axis == 2: # z轴(dim=0)
|
| 69 |
+
if shift > 0:
|
| 70 |
+
slice_dims[2] = slice(shift, None)
|
| 71 |
+
else:
|
| 72 |
+
slice_dims[2] = slice(None, shift)
|
| 73 |
+
|
| 74 |
+
# 应用切片并恢复维度
|
| 75 |
+
padded = padded.squeeze(0).squeeze(0)
|
| 76 |
+
sliced = padded[slice_dims]
|
| 77 |
+
return sliced
|
| 78 |
+
|
| 79 |
+
# 获取各方向邻居(确保维度一致)
|
| 80 |
+
left = get_neighbor(val, 1, axis=0) # x方向
|
| 81 |
+
right = get_neighbor(val, -1, axis=0)
|
| 82 |
+
back = get_neighbor(val, 1, axis=1) # y方向
|
| 83 |
+
front = get_neighbor(val, -1, axis=1)
|
| 84 |
+
down = get_neighbor(val, 1, axis=2) # z方向
|
| 85 |
+
up = get_neighbor(val, -1, axis=2)
|
| 86 |
+
|
| 87 |
+
# 处理边界无效值(使用where保持维度一致)
|
| 88 |
+
def safe_where(neighbor):
|
| 89 |
+
return torch.where(neighbor > -9000, neighbor, val)
|
| 90 |
+
|
| 91 |
+
left = safe_where(left)
|
| 92 |
+
right = safe_where(right)
|
| 93 |
+
back = safe_where(back)
|
| 94 |
+
front = safe_where(front)
|
| 95 |
+
down = safe_where(down)
|
| 96 |
+
up = safe_where(up)
|
| 97 |
+
|
| 98 |
+
# 计算符号一致性(转换为float32确保精度)
|
| 99 |
+
sign = torch.sign(val.to(torch.float32))
|
| 100 |
+
neighbors_sign = torch.stack([
|
| 101 |
+
torch.sign(left.to(torch.float32)),
|
| 102 |
+
torch.sign(right.to(torch.float32)),
|
| 103 |
+
torch.sign(back.to(torch.float32)),
|
| 104 |
+
torch.sign(front.to(torch.float32)),
|
| 105 |
+
torch.sign(down.to(torch.float32)),
|
| 106 |
+
torch.sign(up.to(torch.float32))
|
| 107 |
+
], dim=0)
|
| 108 |
+
|
| 109 |
+
# 检查所有符号是否一致
|
| 110 |
+
same_sign = torch.all(neighbors_sign == sign, dim=0)
|
| 111 |
+
|
| 112 |
+
# 生成最终掩码
|
| 113 |
+
mask = (~same_sign).to(torch.int32)
|
| 114 |
+
return mask * valid_mask.to(torch.int32)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def generate_dense_grid_points(
|
| 118 |
+
bbox_min: np.ndarray,
|
| 119 |
+
bbox_max: np.ndarray,
|
| 120 |
+
octree_resolution: int,
|
| 121 |
+
indexing: str = "ij",
|
| 122 |
+
):
|
| 123 |
+
length = bbox_max - bbox_min
|
| 124 |
+
num_cells = octree_resolution
|
| 125 |
+
|
| 126 |
+
x = np.linspace(bbox_min[0], bbox_max[0], int(num_cells) + 1, dtype=np.float32)
|
| 127 |
+
y = np.linspace(bbox_min[1], bbox_max[1], int(num_cells) + 1, dtype=np.float32)
|
| 128 |
+
z = np.linspace(bbox_min[2], bbox_max[2], int(num_cells) + 1, dtype=np.float32)
|
| 129 |
+
[xs, ys, zs] = np.meshgrid(x, y, z, indexing=indexing)
|
| 130 |
+
xyz = np.stack((xs, ys, zs), axis=-1)
|
| 131 |
+
grid_size = [int(num_cells) + 1, int(num_cells) + 1, int(num_cells) + 1]
|
| 132 |
+
|
| 133 |
+
return xyz, grid_size, length
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class VanillaVolumeDecoder:
|
| 137 |
+
@torch.no_grad()
|
| 138 |
+
def __call__(
|
| 139 |
+
self,
|
| 140 |
+
latents: torch.FloatTensor,
|
| 141 |
+
geo_decoder: Callable,
|
| 142 |
+
bounds: Union[Tuple[float], List[float], float] = 1.01,
|
| 143 |
+
num_chunks: int = 10000,
|
| 144 |
+
octree_resolution: int = 384,
|
| 145 |
+
enable_pbar: bool = True,
|
| 146 |
+
**kwargs,
|
| 147 |
+
):
|
| 148 |
+
device = latents.device
|
| 149 |
+
dtype = latents.dtype
|
| 150 |
+
batch_size = latents.shape[0]
|
| 151 |
+
|
| 152 |
+
# 1. generate query points
|
| 153 |
+
if isinstance(bounds, float):
|
| 154 |
+
bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
|
| 155 |
+
|
| 156 |
+
bbox_min, bbox_max = np.array(bounds[0:3]), np.array(bounds[3:6])
|
| 157 |
+
xyz_samples, grid_size, length = generate_dense_grid_points(
|
| 158 |
+
bbox_min=bbox_min,
|
| 159 |
+
bbox_max=bbox_max,
|
| 160 |
+
octree_resolution=octree_resolution,
|
| 161 |
+
indexing="ij"
|
| 162 |
+
)
|
| 163 |
+
xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype).contiguous().reshape(-1, 3)
|
| 164 |
+
|
| 165 |
+
# 2. latents to 3d volume
|
| 166 |
+
batch_features = []
|
| 167 |
+
for start in tqdm(range(0, xyz_samples.shape[0], num_chunks), desc=f"Volume Decoding",
|
| 168 |
+
disable=not enable_pbar):
|
| 169 |
+
chunk_queries = xyz_samples[start: start + num_chunks, :]
|
| 170 |
+
chunk_queries = repeat(chunk_queries, "p c -> b p c", b=batch_size)
|
| 171 |
+
features = geo_decoder(queries=chunk_queries, latents=latents)
|
| 172 |
+
batch_features.append(features)
|
| 173 |
+
|
| 174 |
+
grid_features = torch.cat(batch_features, dim=1)
|
| 175 |
+
grid_logits, grid_features = grid_features[..., 0:1], grid_features[..., 1:]
|
| 176 |
+
grid_logits = grid_logits.view((batch_size, *grid_size)).float()
|
| 177 |
+
|
| 178 |
+
return grid_logits
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class HierarchicalVolumeDecoding:
|
| 182 |
+
@torch.no_grad()
|
| 183 |
+
def __call__(
|
| 184 |
+
self,
|
| 185 |
+
latents: torch.FloatTensor,
|
| 186 |
+
geo_decoder: Callable,
|
| 187 |
+
bounds: Union[Tuple[float], List[float], float] = 1.01,
|
| 188 |
+
num_chunks: int = 65536,
|
| 189 |
+
mc_level: float = 0.0,
|
| 190 |
+
octree_resolution: int = 384,
|
| 191 |
+
min_resolution: int = 63,
|
| 192 |
+
enable_pbar: bool = True,
|
| 193 |
+
empty_value: float = -10000.,
|
| 194 |
+
**kwargs,
|
| 195 |
+
):
|
| 196 |
+
device = latents.device
|
| 197 |
+
dtype = latents.dtype
|
| 198 |
+
|
| 199 |
+
resolutions = []
|
| 200 |
+
if octree_resolution < min_resolution:
|
| 201 |
+
resolutions.append(octree_resolution)
|
| 202 |
+
while octree_resolution >= min_resolution:
|
| 203 |
+
resolutions.append(octree_resolution)
|
| 204 |
+
octree_resolution = octree_resolution // 2
|
| 205 |
+
resolutions.reverse()
|
| 206 |
+
|
| 207 |
+
# 1. generate query points
|
| 208 |
+
if isinstance(bounds, float):
|
| 209 |
+
bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
|
| 210 |
+
bbox_min = np.array(bounds[0:3])
|
| 211 |
+
bbox_max = np.array(bounds[3:6])
|
| 212 |
+
bbox_size = bbox_max - bbox_min
|
| 213 |
+
|
| 214 |
+
xyz_samples, grid_size, length = generate_dense_grid_points(
|
| 215 |
+
bbox_min=bbox_min,
|
| 216 |
+
bbox_max=bbox_max,
|
| 217 |
+
octree_resolution=resolutions[0],
|
| 218 |
+
indexing="ij"
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
dilate = nn.Conv3d(1, 1, 3, padding=1, bias=False, device=device, dtype=dtype)
|
| 222 |
+
dilate.weight = torch.nn.Parameter(torch.ones(dilate.weight.shape, dtype=dtype, device=device))
|
| 223 |
+
|
| 224 |
+
grid_size = np.array(grid_size)
|
| 225 |
+
xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype).contiguous().reshape(-1, 3)
|
| 226 |
+
|
| 227 |
+
# 2. latents to 3d volume
|
| 228 |
+
batch_features = []
|
| 229 |
+
batch_size = latents.shape[0]
|
| 230 |
+
for start in tqdm(range(0, xyz_samples.shape[0], num_chunks),
|
| 231 |
+
desc=f"Hierarchical Volume Decoding [r{resolutions[0] + 1}]", disable=not enable_pbar):
|
| 232 |
+
queries = xyz_samples[start: start + num_chunks, :]
|
| 233 |
+
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
| 234 |
+
features = geo_decoder(queries=batch_queries, latents=latents)
|
| 235 |
+
batch_features.append(features)
|
| 236 |
+
|
| 237 |
+
grid_features = torch.cat(batch_features, dim=1).view((batch_size, grid_size[0], grid_size[1], grid_size[2], -1))
|
| 238 |
+
grid_logits = grid_features[..., 0] # assume the first element is the logits
|
| 239 |
+
|
| 240 |
+
for octree_depth_now in resolutions[1:]:
|
| 241 |
+
grid_size = np.array([octree_depth_now + 1] * 3)
|
| 242 |
+
resolution = bbox_size / octree_depth_now
|
| 243 |
+
next_index = torch.zeros(tuple(grid_size), dtype=dtype, device=device)
|
| 244 |
+
next_logits = torch.full(next_index.shape, -10000., dtype=dtype, device=device)
|
| 245 |
+
curr_points = extract_near_surface_volume_fn(grid_logits.squeeze(0), mc_level)
|
| 246 |
+
curr_points += grid_logits.squeeze(0).abs() < 0.95
|
| 247 |
+
|
| 248 |
+
if octree_depth_now == resolutions[-1]:
|
| 249 |
+
expand_num = 0
|
| 250 |
+
else:
|
| 251 |
+
expand_num = 1
|
| 252 |
+
for i in range(expand_num):
|
| 253 |
+
curr_points = dilate(curr_points.unsqueeze(0).to(dtype)).squeeze(0)
|
| 254 |
+
(cidx_x, cidx_y, cidx_z) = torch.where(curr_points > 0)
|
| 255 |
+
next_index[cidx_x * 2, cidx_y * 2, cidx_z * 2] = 1
|
| 256 |
+
for i in range(2 - expand_num):
|
| 257 |
+
next_index = dilate(next_index.unsqueeze(0)).squeeze(0)
|
| 258 |
+
nidx = torch.where(next_index > 0)
|
| 259 |
+
|
| 260 |
+
next_points = torch.stack(nidx, dim=1)
|
| 261 |
+
next_points = (next_points * torch.tensor(resolution, dtype=latents.dtype, device=device) +
|
| 262 |
+
torch.tensor(bbox_min, dtype=latents.dtype, device=device))
|
| 263 |
+
|
| 264 |
+
batch_features = []
|
| 265 |
+
for start in tqdm(range(0, next_points.shape[0], num_chunks),
|
| 266 |
+
desc=f"Hierarchical Volume Decoding [r{octree_depth_now + 1}]", disable=not enable_pbar):
|
| 267 |
+
queries = next_points[start: start + num_chunks, :]
|
| 268 |
+
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
| 269 |
+
features = geo_decoder(queries=batch_queries.to(latents.dtype), latents=latents)
|
| 270 |
+
batch_features.append(features)
|
| 271 |
+
grid_features = torch.cat(batch_features, dim=1)
|
| 272 |
+
grid_logits = grid_features[..., 0:1]
|
| 273 |
+
next_logits[nidx] = grid_logits[0, ..., 0]
|
| 274 |
+
grid_logits = next_logits.unsqueeze(0)
|
| 275 |
+
grid_logits[grid_logits == -10000.] = empty_value
|
| 276 |
+
|
| 277 |
+
return grid_logits
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
clip_dinov2_encoder,
|
| 3 |
+
dinov2_encoder
|
| 4 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (259 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (4 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/clip_dinov2_encoder.cpython-310.pyc
ADDED
|
Binary file (8.13 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/__pycache__/dinov2_encoder.cpython-310.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/base.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from torchvision.transforms import Normalize
|
| 8 |
+
from torchvision.transforms import InterpolationMode
|
| 9 |
+
from torchvision.transforms.transforms import _interpolation_modes_from_int
|
| 10 |
+
|
| 11 |
+
from transformers import CLIPModel, CLIPTokenizer, CLIPImageProcessor
|
| 12 |
+
from transformers.utils import ModelOutput
|
| 13 |
+
from typing import Iterable, Optional, Union, List
|
| 14 |
+
|
| 15 |
+
import craftsman
|
| 16 |
+
from craftsman.utils.base import BaseModule
|
| 17 |
+
from craftsman.utils.typing import *
|
| 18 |
+
|
| 19 |
+
ImageType = Union[np.ndarray, torch.Tensor, Image.Image]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class BaseEmbedder(BaseModule):
|
| 23 |
+
@dataclass
|
| 24 |
+
class Config(BaseModule.Config):
|
| 25 |
+
pretrained_model_name_or_path: Optional[str] = None # the pretrained model name or path
|
| 26 |
+
|
| 27 |
+
encode_camera: bool = False # whether to encode camera
|
| 28 |
+
camera_embeds_type: str = "sincos" # the type of camera embeds
|
| 29 |
+
camera_embeds_dim: Optional[int] = None # the dimension of camera embeds
|
| 30 |
+
n_views: int = 1 # the number of views
|
| 31 |
+
|
| 32 |
+
empty_embeds_ratio: float = 0.1 # the ratio of empty embeds
|
| 33 |
+
zero_uncond_embeds: bool = True
|
| 34 |
+
|
| 35 |
+
normalize_embeds: bool = False # whether to normalize the embeds
|
| 36 |
+
|
| 37 |
+
cfg: Config
|
| 38 |
+
|
| 39 |
+
def configure(self) -> None:
|
| 40 |
+
super().configure()
|
| 41 |
+
|
| 42 |
+
if self.cfg.encode_camera:
|
| 43 |
+
self.distance = 1.0
|
| 44 |
+
self.register_buffer(
|
| 45 |
+
"cameras",
|
| 46 |
+
torch.as_tensor([
|
| 47 |
+
[[1, 0, 0, 0],
|
| 48 |
+
[0, 0, -1, -self.distance],
|
| 49 |
+
[0, 1, 0, 0],
|
| 50 |
+
[0, 0, 0, 1]], # front to back
|
| 51 |
+
|
| 52 |
+
[[0, 0, 1, self.distance],
|
| 53 |
+
[1, 0, 0, 0],
|
| 54 |
+
[0, 1, 0, 0],
|
| 55 |
+
[0, 0, 0, 1]], # right to left
|
| 56 |
+
|
| 57 |
+
[[-1, 0, 0, 0],
|
| 58 |
+
[0, 0, 1, self.distance],
|
| 59 |
+
[0, 1, 0, 0],
|
| 60 |
+
[0, 0, 0, 1]], # back to front
|
| 61 |
+
|
| 62 |
+
[[0, 0, -1, -self.distance],
|
| 63 |
+
[-1, 0, 0, 0],
|
| 64 |
+
[0, 1, 0, 0],
|
| 65 |
+
[0, 0, 0, 1]], # left to right
|
| 66 |
+
], dtype=torch.float32),
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
def encode_image(self, images: Iterable[Optional[ImageType]], camera_embeds: Optional[torch.Tensor] = None, **kwargs) -> torch.FloatTensor:
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def encode_camera(self, c2ws: torch.Tensor):
|
| 73 |
+
if self.cfg.camera_embeds_type == "sincos":
|
| 74 |
+
assert c2ws.shape[-1] == 4 and c2ws.shape[-2] == 4, f"Invalid c2ws shape: {c2ws.shape}"
|
| 75 |
+
c2ws = c2ws.view(-1, 16)
|
| 76 |
+
return torch.cat([torch.sin(c2ws), torch.cos(c2ws)], dim=-1)
|
| 77 |
+
else:
|
| 78 |
+
raise NotImplementedError(f"Unknown camera_embeds_type: {self.cfg.camera_embeds_type}")
|
| 79 |
+
|
| 80 |
+
def post_process_embeds(self, visual_embeds):
|
| 81 |
+
bs =visual_embeds.shape[0]
|
| 82 |
+
|
| 83 |
+
if self.cfg.normalize_embeds:
|
| 84 |
+
# post-process the visual embeds
|
| 85 |
+
if visual_embeds is not None:
|
| 86 |
+
visual_embeds = visual_embeds / visual_embeds.norm(dim=-1, keepdim=True)
|
| 87 |
+
|
| 88 |
+
assert visual_embeds is not None
|
| 89 |
+
# return visual_embeds
|
| 90 |
+
return visual_embeds
|
| 91 |
+
|
| 92 |
+
def forward(self, batch):
|
| 93 |
+
if batch["image"].dim() == 5:
|
| 94 |
+
bs = batch["image"].shape[0] * batch["image"].shape[1]
|
| 95 |
+
else:
|
| 96 |
+
bs = batch["image"].shape[0]
|
| 97 |
+
|
| 98 |
+
visual_embeds = None
|
| 99 |
+
|
| 100 |
+
if random.random() < self.cfg.empty_embeds_ratio:
|
| 101 |
+
if "image" in batch or "image_embeds" in batch:
|
| 102 |
+
visual_embeds = self.empty_image_embeds.repeat(bs, 1, 1)
|
| 103 |
+
elif "mvimages" in batch or "mvimage_embeds" in batch:
|
| 104 |
+
visual_embeds = self.empty_image_embeds.unsqueeze(1).repeat(bs, 1, 1, 1)
|
| 105 |
+
else:
|
| 106 |
+
# for visual inputs
|
| 107 |
+
if "image" in batch:
|
| 108 |
+
if self.cfg.encode_camera:
|
| 109 |
+
visual_embeds = self.encode_image(batch["image"], cameras=batch["c2w"])
|
| 110 |
+
else:
|
| 111 |
+
visual_embeds = self.encode_image(batch["image"])
|
| 112 |
+
elif "mvimages" in batch:
|
| 113 |
+
n_views = batch["mvimages"].shape[1]
|
| 114 |
+
if self.cfg.encode_camera:
|
| 115 |
+
visual_embeds = self.encode_image(
|
| 116 |
+
batch["mvimages"].view(-1, *batch["mvimages"].shape[-3:]), \
|
| 117 |
+
cameras=batch["c2ws"]).view(bs, n_views, *self.empty_image_embeds.shape[-2:])
|
| 118 |
+
else:
|
| 119 |
+
visual_embeds = self.encode_image(
|
| 120 |
+
batch["mvimages"].view(-1, *batch["mvimages"].shape[-3:])).view(bs, n_views, *self.empty_image_embeds.shape[-2:])
|
| 121 |
+
|
| 122 |
+
return self.post_process_embeds(visual_embeds)
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-310.pyc
ADDED
|
Binary file (44.1 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/__pycache__/modeling_conditional_clip.cpython-310.pyc
ADDED
|
Binary file (9.86 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/modeling_clip.py
ADDED
|
@@ -0,0 +1,1419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch CLIP model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import Any, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 25 |
+
|
| 26 |
+
from transformers.activations import ACT2FN
|
| 27 |
+
from transformers.modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
|
| 28 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
|
| 29 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 30 |
+
from transformers.utils import (
|
| 31 |
+
ModelOutput,
|
| 32 |
+
add_code_sample_docstrings,
|
| 33 |
+
add_start_docstrings,
|
| 34 |
+
add_start_docstrings_to_model_forward,
|
| 35 |
+
logging,
|
| 36 |
+
replace_return_docstrings,
|
| 37 |
+
)
|
| 38 |
+
from transformers.models.clip.configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
logger = logging.get_logger(__name__)
|
| 42 |
+
|
| 43 |
+
# General docstring
|
| 44 |
+
_CONFIG_FOR_DOC = "CLIPConfig"
|
| 45 |
+
_CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
|
| 46 |
+
|
| 47 |
+
# Image classification docstring
|
| 48 |
+
_IMAGE_CLASS_CHECKPOINT = "openai/clip-vit-base-patch32"
|
| 49 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_0"
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# contrastive loss function, adapted from
|
| 53 |
+
# https://sachinruk.github.io/blog/2021-03-07-clip.html
|
| 54 |
+
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
| 55 |
+
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
|
| 59 |
+
caption_loss = contrastive_loss(similarity)
|
| 60 |
+
image_loss = contrastive_loss(similarity.t())
|
| 61 |
+
return (caption_loss + image_loss) / 2.0
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@dataclass
|
| 65 |
+
class CLIPVisionModelOutput(ModelOutput):
|
| 66 |
+
"""
|
| 67 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| 71 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
| 72 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 73 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 74 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 75 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 76 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 77 |
+
|
| 78 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 79 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 80 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 81 |
+
sequence_length)`.
|
| 82 |
+
|
| 83 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 84 |
+
heads.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
| 88 |
+
last_hidden_state: torch.FloatTensor = None
|
| 89 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 90 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@dataclass
|
| 94 |
+
class CLIPTextModelOutput(ModelOutput):
|
| 95 |
+
"""
|
| 96 |
+
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| 100 |
+
The text embeddings obtained by applying the projection layer to the pooler_output.
|
| 101 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 102 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 103 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 104 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 105 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 106 |
+
|
| 107 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 108 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 109 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 110 |
+
sequence_length)`.
|
| 111 |
+
|
| 112 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 113 |
+
heads.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
text_embeds: Optional[torch.FloatTensor] = None
|
| 117 |
+
last_hidden_state: torch.FloatTensor = None
|
| 118 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 119 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@dataclass
|
| 123 |
+
class CLIPOutput(ModelOutput):
|
| 124 |
+
"""
|
| 125 |
+
Args:
|
| 126 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
| 127 |
+
Contrastive loss for image-text similarity.
|
| 128 |
+
logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
| 129 |
+
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
| 130 |
+
similarity scores.
|
| 131 |
+
logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
| 132 |
+
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
| 133 |
+
similarity scores.
|
| 134 |
+
text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| 135 |
+
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
|
| 136 |
+
image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| 137 |
+
The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
| 138 |
+
text_model_output(`BaseModelOutputWithPooling`):
|
| 139 |
+
The output of the [`CLIPTextModel`].
|
| 140 |
+
vision_model_output(`BaseModelOutputWithPooling`):
|
| 141 |
+
The output of the [`CLIPVisionModel`].
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
loss: Optional[torch.FloatTensor] = None
|
| 145 |
+
logits_per_image: torch.FloatTensor = None
|
| 146 |
+
logits_per_text: torch.FloatTensor = None
|
| 147 |
+
text_embeds: torch.FloatTensor = None
|
| 148 |
+
image_embeds: torch.FloatTensor = None
|
| 149 |
+
text_model_output: BaseModelOutputWithPooling = None
|
| 150 |
+
vision_model_output: BaseModelOutputWithPooling = None
|
| 151 |
+
|
| 152 |
+
def to_tuple(self) -> Tuple[Any]:
|
| 153 |
+
return tuple(
|
| 154 |
+
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
| 155 |
+
for k in self.keys()
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class CLIPVisionEmbeddings(nn.Module):
|
| 160 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 161 |
+
super().__init__()
|
| 162 |
+
self.config = config
|
| 163 |
+
self.embed_dim = config.hidden_size
|
| 164 |
+
self.image_size = config.image_size
|
| 165 |
+
self.patch_size = config.patch_size
|
| 166 |
+
|
| 167 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
| 168 |
+
|
| 169 |
+
self.patch_embedding = nn.Conv2d(
|
| 170 |
+
in_channels=config.num_channels,
|
| 171 |
+
out_channels=self.embed_dim,
|
| 172 |
+
kernel_size=self.patch_size,
|
| 173 |
+
stride=self.patch_size,
|
| 174 |
+
bias=False,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 178 |
+
self.num_positions = self.num_patches + 1
|
| 179 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
| 180 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
| 181 |
+
|
| 182 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 183 |
+
batch_size = pixel_values.shape[0]
|
| 184 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 185 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
| 186 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 187 |
+
|
| 188 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
| 189 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 190 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
| 191 |
+
return embeddings
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class CLIPTextEmbeddings(nn.Module):
|
| 195 |
+
def __init__(self, config: CLIPTextConfig):
|
| 196 |
+
super().__init__()
|
| 197 |
+
embed_dim = config.hidden_size
|
| 198 |
+
|
| 199 |
+
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
|
| 200 |
+
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
|
| 201 |
+
|
| 202 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 203 |
+
self.register_buffer(
|
| 204 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
def forward(
|
| 208 |
+
self,
|
| 209 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 210 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 211 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 212 |
+
) -> torch.Tensor:
|
| 213 |
+
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
|
| 214 |
+
|
| 215 |
+
if position_ids is None:
|
| 216 |
+
position_ids = self.position_ids[:, :seq_length]
|
| 217 |
+
|
| 218 |
+
if inputs_embeds is None:
|
| 219 |
+
inputs_embeds = self.token_embedding(input_ids)
|
| 220 |
+
|
| 221 |
+
position_embeddings = self.position_embedding(position_ids)
|
| 222 |
+
embeddings = inputs_embeds + position_embeddings
|
| 223 |
+
|
| 224 |
+
return embeddings
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class CLIPAttention(nn.Module):
|
| 228 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 229 |
+
|
| 230 |
+
def __init__(self, config):
|
| 231 |
+
super().__init__()
|
| 232 |
+
self.config = config
|
| 233 |
+
self.embed_dim = config.hidden_size
|
| 234 |
+
self.num_heads = config.num_attention_heads
|
| 235 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 236 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 237 |
+
raise ValueError(
|
| 238 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 239 |
+
f" {self.num_heads})."
|
| 240 |
+
)
|
| 241 |
+
self.scale = self.head_dim**-0.5
|
| 242 |
+
self.dropout = config.attention_dropout
|
| 243 |
+
|
| 244 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 245 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 246 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 247 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 248 |
+
|
| 249 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 250 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 251 |
+
|
| 252 |
+
def forward(
|
| 253 |
+
self,
|
| 254 |
+
hidden_states: torch.Tensor,
|
| 255 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 256 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 257 |
+
output_attentions: Optional[bool] = False,
|
| 258 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 259 |
+
"""Input shape: Batch x Time x Channel"""
|
| 260 |
+
|
| 261 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
| 262 |
+
|
| 263 |
+
# get query proj
|
| 264 |
+
query_states = self.q_proj(hidden_states) * self.scale
|
| 265 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 266 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 267 |
+
|
| 268 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| 269 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
| 270 |
+
key_states = key_states.view(*proj_shape)
|
| 271 |
+
value_states = value_states.view(*proj_shape)
|
| 272 |
+
|
| 273 |
+
src_len = key_states.size(1)
|
| 274 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
| 275 |
+
|
| 276 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
| 277 |
+
raise ValueError(
|
| 278 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| 279 |
+
f" {attn_weights.size()}"
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
# apply the causal_attention_mask first
|
| 283 |
+
if causal_attention_mask is not None:
|
| 284 |
+
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
| 287 |
+
f" {causal_attention_mask.size()}"
|
| 288 |
+
)
|
| 289 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
| 290 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 291 |
+
|
| 292 |
+
if attention_mask is not None:
|
| 293 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 294 |
+
raise ValueError(
|
| 295 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
| 296 |
+
)
|
| 297 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
| 298 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 299 |
+
|
| 300 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 301 |
+
|
| 302 |
+
if output_attentions:
|
| 303 |
+
# this operation is a bit akward, but it's required to
|
| 304 |
+
# make sure that attn_weights keeps its gradient.
|
| 305 |
+
# In order to do so, attn_weights have to reshaped
|
| 306 |
+
# twice and have to be reused in the following
|
| 307 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 308 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| 309 |
+
else:
|
| 310 |
+
attn_weights_reshaped = None
|
| 311 |
+
|
| 312 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 313 |
+
|
| 314 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
| 315 |
+
|
| 316 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
| 317 |
+
raise ValueError(
|
| 318 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 319 |
+
f" {attn_output.size()}"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
| 323 |
+
attn_output = attn_output.transpose(1, 2)
|
| 324 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
| 325 |
+
|
| 326 |
+
attn_output = self.out_proj(attn_output)
|
| 327 |
+
|
| 328 |
+
return attn_output, attn_weights_reshaped
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class CLIPMLP(nn.Module):
|
| 332 |
+
def __init__(self, config):
|
| 333 |
+
super().__init__()
|
| 334 |
+
self.config = config
|
| 335 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 336 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 337 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 338 |
+
|
| 339 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 340 |
+
hidden_states = self.fc1(hidden_states)
|
| 341 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 342 |
+
hidden_states = self.fc2(hidden_states)
|
| 343 |
+
return hidden_states
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
class CLIPEncoderLayer(nn.Module):
|
| 347 |
+
def __init__(self, config: CLIPConfig):
|
| 348 |
+
super().__init__()
|
| 349 |
+
self.embed_dim = config.hidden_size
|
| 350 |
+
self.self_attn = CLIPAttention(config)
|
| 351 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 352 |
+
self.mlp = CLIPMLP(config)
|
| 353 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 354 |
+
|
| 355 |
+
def forward(
|
| 356 |
+
self,
|
| 357 |
+
hidden_states: torch.Tensor,
|
| 358 |
+
attention_mask: torch.Tensor,
|
| 359 |
+
causal_attention_mask: torch.Tensor,
|
| 360 |
+
output_attentions: Optional[bool] = False,
|
| 361 |
+
) -> Tuple[torch.FloatTensor]:
|
| 362 |
+
"""
|
| 363 |
+
Args:
|
| 364 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 365 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
| 366 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 367 |
+
`(config.encoder_attention_heads,)`.
|
| 368 |
+
output_attentions (`bool`, *optional*):
|
| 369 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 370 |
+
returned tensors for more detail.
|
| 371 |
+
"""
|
| 372 |
+
residual = hidden_states
|
| 373 |
+
|
| 374 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 375 |
+
hidden_states, attn_weights = self.self_attn(
|
| 376 |
+
hidden_states=hidden_states,
|
| 377 |
+
attention_mask=attention_mask,
|
| 378 |
+
causal_attention_mask=causal_attention_mask,
|
| 379 |
+
output_attentions=output_attentions,
|
| 380 |
+
)
|
| 381 |
+
hidden_states = residual + hidden_states
|
| 382 |
+
|
| 383 |
+
residual = hidden_states
|
| 384 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 385 |
+
hidden_states = self.mlp(hidden_states)
|
| 386 |
+
hidden_states = residual + hidden_states
|
| 387 |
+
|
| 388 |
+
outputs = (hidden_states,)
|
| 389 |
+
|
| 390 |
+
if output_attentions:
|
| 391 |
+
outputs += (attn_weights,)
|
| 392 |
+
|
| 393 |
+
return outputs
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
class CLIPPreTrainedModel(PreTrainedModel):
|
| 397 |
+
"""
|
| 398 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 399 |
+
models.
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
config_class = CLIPConfig
|
| 403 |
+
base_model_prefix = "clip"
|
| 404 |
+
supports_gradient_checkpointing = True
|
| 405 |
+
|
| 406 |
+
def _init_weights(self, module):
|
| 407 |
+
"""Initialize the weights"""
|
| 408 |
+
factor = self.config.initializer_factor
|
| 409 |
+
if isinstance(module, CLIPTextEmbeddings):
|
| 410 |
+
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| 411 |
+
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| 412 |
+
elif isinstance(module, CLIPVisionEmbeddings):
|
| 413 |
+
factor = self.config.initializer_factor
|
| 414 |
+
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
|
| 415 |
+
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
|
| 416 |
+
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
|
| 417 |
+
elif isinstance(module, CLIPAttention):
|
| 418 |
+
factor = self.config.initializer_factor
|
| 419 |
+
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| 420 |
+
out_proj_std = (module.embed_dim**-0.5) * factor
|
| 421 |
+
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
|
| 422 |
+
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
|
| 423 |
+
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
|
| 424 |
+
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
|
| 425 |
+
elif isinstance(module, CLIPMLP):
|
| 426 |
+
factor = self.config.initializer_factor
|
| 427 |
+
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| 428 |
+
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
| 429 |
+
nn.init.normal_(module.fc1.weight, std=fc_std)
|
| 430 |
+
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
| 431 |
+
elif isinstance(module, CLIPModel):
|
| 432 |
+
nn.init.normal_(
|
| 433 |
+
module.text_projection.weight,
|
| 434 |
+
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
|
| 435 |
+
)
|
| 436 |
+
nn.init.normal_(
|
| 437 |
+
module.visual_projection.weight,
|
| 438 |
+
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
|
| 439 |
+
)
|
| 440 |
+
elif isinstance(module, CLIPVisionModelWithProjection):
|
| 441 |
+
nn.init.normal_(
|
| 442 |
+
module.visual_projection.weight,
|
| 443 |
+
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 444 |
+
)
|
| 445 |
+
elif isinstance(module, CLIPTextModelWithProjection):
|
| 446 |
+
nn.init.normal_(
|
| 447 |
+
module.text_projection.weight,
|
| 448 |
+
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 449 |
+
)
|
| 450 |
+
elif isinstance(module, CLIPForImageClassification):
|
| 451 |
+
nn.init.normal_(
|
| 452 |
+
module.classifier.weight,
|
| 453 |
+
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
if isinstance(module, nn.LayerNorm):
|
| 457 |
+
module.bias.data.zero_()
|
| 458 |
+
module.weight.data.fill_(1.0)
|
| 459 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
| 460 |
+
module.bias.data.zero_()
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
CLIP_START_DOCSTRING = r"""
|
| 464 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 465 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 466 |
+
etc.)
|
| 467 |
+
|
| 468 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 469 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 470 |
+
and behavior.
|
| 471 |
+
|
| 472 |
+
Parameters:
|
| 473 |
+
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
|
| 474 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 475 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 476 |
+
"""
|
| 477 |
+
|
| 478 |
+
CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
| 479 |
+
Args:
|
| 480 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 481 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 482 |
+
it.
|
| 483 |
+
|
| 484 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 485 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 486 |
+
|
| 487 |
+
[What are input IDs?](../glossary#input-ids)
|
| 488 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 489 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 490 |
+
|
| 491 |
+
- 1 for tokens that are **not masked**,
|
| 492 |
+
- 0 for tokens that are **masked**.
|
| 493 |
+
|
| 494 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 495 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 496 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 497 |
+
config.max_position_embeddings - 1]`.
|
| 498 |
+
|
| 499 |
+
[What are position IDs?](../glossary#position-ids)
|
| 500 |
+
output_attentions (`bool`, *optional*):
|
| 501 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 502 |
+
tensors for more detail.
|
| 503 |
+
output_hidden_states (`bool`, *optional*):
|
| 504 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 505 |
+
more detail.
|
| 506 |
+
return_dict (`bool`, *optional*):
|
| 507 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 508 |
+
"""
|
| 509 |
+
|
| 510 |
+
CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 511 |
+
Args:
|
| 512 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 513 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 514 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 515 |
+
output_attentions (`bool`, *optional*):
|
| 516 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 517 |
+
tensors for more detail.
|
| 518 |
+
output_hidden_states (`bool`, *optional*):
|
| 519 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 520 |
+
more detail.
|
| 521 |
+
return_dict (`bool`, *optional*):
|
| 522 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 523 |
+
"""
|
| 524 |
+
|
| 525 |
+
CLIP_INPUTS_DOCSTRING = r"""
|
| 526 |
+
Args:
|
| 527 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 528 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 529 |
+
it.
|
| 530 |
+
|
| 531 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 532 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 533 |
+
|
| 534 |
+
[What are input IDs?](../glossary#input-ids)
|
| 535 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 536 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 537 |
+
|
| 538 |
+
- 1 for tokens that are **not masked**,
|
| 539 |
+
- 0 for tokens that are **masked**.
|
| 540 |
+
|
| 541 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 542 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 543 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 544 |
+
config.max_position_embeddings - 1]`.
|
| 545 |
+
|
| 546 |
+
[What are position IDs?](../glossary#position-ids)
|
| 547 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 548 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 549 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 550 |
+
return_loss (`bool`, *optional*):
|
| 551 |
+
Whether or not to return the contrastive loss.
|
| 552 |
+
output_attentions (`bool`, *optional*):
|
| 553 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 554 |
+
tensors for more detail.
|
| 555 |
+
output_hidden_states (`bool`, *optional*):
|
| 556 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 557 |
+
more detail.
|
| 558 |
+
return_dict (`bool`, *optional*):
|
| 559 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 560 |
+
"""
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class CLIPEncoder(nn.Module):
|
| 564 |
+
"""
|
| 565 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 566 |
+
[`CLIPEncoderLayer`].
|
| 567 |
+
|
| 568 |
+
Args:
|
| 569 |
+
config: CLIPConfig
|
| 570 |
+
"""
|
| 571 |
+
|
| 572 |
+
def __init__(self, config: CLIPConfig):
|
| 573 |
+
super().__init__()
|
| 574 |
+
self.config = config
|
| 575 |
+
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 576 |
+
self.gradient_checkpointing = False
|
| 577 |
+
|
| 578 |
+
def forward(
|
| 579 |
+
self,
|
| 580 |
+
inputs_embeds,
|
| 581 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 582 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 583 |
+
output_attentions: Optional[bool] = None,
|
| 584 |
+
output_hidden_states: Optional[bool] = None,
|
| 585 |
+
return_dict: Optional[bool] = None,
|
| 586 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 587 |
+
r"""
|
| 588 |
+
Args:
|
| 589 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 590 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
| 591 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
| 592 |
+
than the model's internal embedding lookup matrix.
|
| 593 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 594 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 595 |
+
|
| 596 |
+
- 1 for tokens that are **not masked**,
|
| 597 |
+
- 0 for tokens that are **masked**.
|
| 598 |
+
|
| 599 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 600 |
+
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 601 |
+
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
| 602 |
+
|
| 603 |
+
- 1 for tokens that are **not masked**,
|
| 604 |
+
- 0 for tokens that are **masked**.
|
| 605 |
+
|
| 606 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 607 |
+
output_attentions (`bool`, *optional*):
|
| 608 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 609 |
+
returned tensors for more detail.
|
| 610 |
+
output_hidden_states (`bool`, *optional*):
|
| 611 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 612 |
+
for more detail.
|
| 613 |
+
return_dict (`bool`, *optional*):
|
| 614 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 615 |
+
"""
|
| 616 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 617 |
+
output_hidden_states = (
|
| 618 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 619 |
+
)
|
| 620 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 621 |
+
|
| 622 |
+
encoder_states = () if output_hidden_states else None
|
| 623 |
+
all_attentions = () if output_attentions else None
|
| 624 |
+
|
| 625 |
+
hidden_states = inputs_embeds
|
| 626 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 627 |
+
if output_hidden_states:
|
| 628 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 629 |
+
if self.gradient_checkpointing and self.training:
|
| 630 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 631 |
+
encoder_layer.__call__,
|
| 632 |
+
hidden_states,
|
| 633 |
+
attention_mask,
|
| 634 |
+
causal_attention_mask,
|
| 635 |
+
output_attentions,
|
| 636 |
+
)
|
| 637 |
+
else:
|
| 638 |
+
layer_outputs = encoder_layer(
|
| 639 |
+
hidden_states,
|
| 640 |
+
attention_mask,
|
| 641 |
+
causal_attention_mask,
|
| 642 |
+
output_attentions=output_attentions,
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
hidden_states = layer_outputs[0]
|
| 646 |
+
|
| 647 |
+
if output_attentions:
|
| 648 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 649 |
+
|
| 650 |
+
if output_hidden_states:
|
| 651 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 652 |
+
|
| 653 |
+
if not return_dict:
|
| 654 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 655 |
+
return BaseModelOutput(
|
| 656 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
class CLIPTextTransformer(nn.Module):
|
| 661 |
+
def __init__(self, config: CLIPTextConfig):
|
| 662 |
+
super().__init__()
|
| 663 |
+
self.config = config
|
| 664 |
+
embed_dim = config.hidden_size
|
| 665 |
+
self.embeddings = CLIPTextEmbeddings(config)
|
| 666 |
+
self.encoder = CLIPEncoder(config)
|
| 667 |
+
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 668 |
+
|
| 669 |
+
# For `pooled_output` computation
|
| 670 |
+
self.eos_token_id = config.eos_token_id
|
| 671 |
+
|
| 672 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 673 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| 674 |
+
def forward(
|
| 675 |
+
self,
|
| 676 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 677 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 678 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 679 |
+
output_attentions: Optional[bool] = None,
|
| 680 |
+
output_hidden_states: Optional[bool] = None,
|
| 681 |
+
return_dict: Optional[bool] = None,
|
| 682 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 683 |
+
r"""
|
| 684 |
+
Returns:
|
| 685 |
+
|
| 686 |
+
"""
|
| 687 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 688 |
+
output_hidden_states = (
|
| 689 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 690 |
+
)
|
| 691 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 692 |
+
|
| 693 |
+
if input_ids is None:
|
| 694 |
+
raise ValueError("You have to specify input_ids")
|
| 695 |
+
|
| 696 |
+
input_shape = input_ids.size()
|
| 697 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
| 698 |
+
|
| 699 |
+
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
| 700 |
+
|
| 701 |
+
# CLIP's text model uses causal mask, prepare it here.
|
| 702 |
+
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
|
| 703 |
+
causal_attention_mask = _create_4d_causal_attention_mask(
|
| 704 |
+
input_shape, hidden_states.dtype, device=hidden_states.device
|
| 705 |
+
)
|
| 706 |
+
# expand attention_mask
|
| 707 |
+
if attention_mask is not None:
|
| 708 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 709 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
|
| 710 |
+
|
| 711 |
+
encoder_outputs = self.encoder(
|
| 712 |
+
inputs_embeds=hidden_states,
|
| 713 |
+
attention_mask=attention_mask,
|
| 714 |
+
causal_attention_mask=causal_attention_mask,
|
| 715 |
+
output_attentions=output_attentions,
|
| 716 |
+
output_hidden_states=output_hidden_states,
|
| 717 |
+
return_dict=return_dict,
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
last_hidden_state = encoder_outputs[0]
|
| 721 |
+
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
| 722 |
+
|
| 723 |
+
if self.eos_token_id == 2:
|
| 724 |
+
# The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
|
| 725 |
+
# A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
|
| 726 |
+
# ------------------------------------------------------------
|
| 727 |
+
# text_embeds.shape = [batch_size, sequence_length, transformer.width]
|
| 728 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 729 |
+
# casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
|
| 730 |
+
pooled_output = last_hidden_state[
|
| 731 |
+
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
| 732 |
+
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
|
| 733 |
+
]
|
| 734 |
+
else:
|
| 735 |
+
# The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
|
| 736 |
+
pooled_output = last_hidden_state[
|
| 737 |
+
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
| 738 |
+
# We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
|
| 739 |
+
# Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer)
|
| 740 |
+
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
|
| 741 |
+
.int()
|
| 742 |
+
.argmax(dim=-1),
|
| 743 |
+
]
|
| 744 |
+
|
| 745 |
+
if not return_dict:
|
| 746 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 747 |
+
|
| 748 |
+
return BaseModelOutputWithPooling(
|
| 749 |
+
last_hidden_state=last_hidden_state,
|
| 750 |
+
pooler_output=pooled_output,
|
| 751 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 752 |
+
attentions=encoder_outputs.attentions,
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
@add_start_docstrings(
|
| 757 |
+
"""The text model from CLIP without any head or projection on top.""",
|
| 758 |
+
CLIP_START_DOCSTRING,
|
| 759 |
+
)
|
| 760 |
+
class CLIPTextModel(CLIPPreTrainedModel):
|
| 761 |
+
config_class = CLIPTextConfig
|
| 762 |
+
|
| 763 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
| 764 |
+
|
| 765 |
+
def __init__(self, config: CLIPTextConfig):
|
| 766 |
+
super().__init__(config)
|
| 767 |
+
self.text_model = CLIPTextTransformer(config)
|
| 768 |
+
# Initialize weights and apply final processing
|
| 769 |
+
self.post_init()
|
| 770 |
+
|
| 771 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 772 |
+
return self.text_model.embeddings.token_embedding
|
| 773 |
+
|
| 774 |
+
def set_input_embeddings(self, value):
|
| 775 |
+
self.text_model.embeddings.token_embedding = value
|
| 776 |
+
|
| 777 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 778 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| 779 |
+
def forward(
|
| 780 |
+
self,
|
| 781 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 782 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 783 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 784 |
+
output_attentions: Optional[bool] = None,
|
| 785 |
+
output_hidden_states: Optional[bool] = None,
|
| 786 |
+
return_dict: Optional[bool] = None,
|
| 787 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 788 |
+
r"""
|
| 789 |
+
Returns:
|
| 790 |
+
|
| 791 |
+
Examples:
|
| 792 |
+
|
| 793 |
+
```python
|
| 794 |
+
>>> from transformers import AutoTokenizer, CLIPTextModel
|
| 795 |
+
|
| 796 |
+
>>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 797 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 798 |
+
|
| 799 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 800 |
+
|
| 801 |
+
>>> outputs = model(**inputs)
|
| 802 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 803 |
+
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
| 804 |
+
```"""
|
| 805 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 806 |
+
|
| 807 |
+
return self.text_model(
|
| 808 |
+
input_ids=input_ids,
|
| 809 |
+
attention_mask=attention_mask,
|
| 810 |
+
position_ids=position_ids,
|
| 811 |
+
output_attentions=output_attentions,
|
| 812 |
+
output_hidden_states=output_hidden_states,
|
| 813 |
+
return_dict=return_dict,
|
| 814 |
+
)
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
class CLIPVisionTransformer(nn.Module):
|
| 818 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 819 |
+
super().__init__()
|
| 820 |
+
self.config = config
|
| 821 |
+
embed_dim = config.hidden_size
|
| 822 |
+
|
| 823 |
+
self.embeddings = CLIPVisionEmbeddings(config)
|
| 824 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 825 |
+
self.encoder = CLIPEncoder(config)
|
| 826 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 827 |
+
|
| 828 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 829 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 830 |
+
def forward(
|
| 831 |
+
self,
|
| 832 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 833 |
+
output_attentions: Optional[bool] = None,
|
| 834 |
+
output_hidden_states: Optional[bool] = None,
|
| 835 |
+
return_dict: Optional[bool] = None,
|
| 836 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 837 |
+
r"""
|
| 838 |
+
Returns:
|
| 839 |
+
|
| 840 |
+
"""
|
| 841 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 842 |
+
output_hidden_states = (
|
| 843 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 844 |
+
)
|
| 845 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 846 |
+
|
| 847 |
+
if pixel_values is None:
|
| 848 |
+
raise ValueError("You have to specify pixel_values")
|
| 849 |
+
|
| 850 |
+
hidden_states = self.embeddings(pixel_values)
|
| 851 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
| 852 |
+
|
| 853 |
+
encoder_outputs = self.encoder(
|
| 854 |
+
inputs_embeds=hidden_states,
|
| 855 |
+
output_attentions=output_attentions,
|
| 856 |
+
output_hidden_states=output_hidden_states,
|
| 857 |
+
return_dict=return_dict,
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
last_hidden_state = encoder_outputs[0]
|
| 861 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 862 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 863 |
+
|
| 864 |
+
if not return_dict:
|
| 865 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 866 |
+
|
| 867 |
+
return BaseModelOutputWithPooling(
|
| 868 |
+
last_hidden_state=last_hidden_state,
|
| 869 |
+
pooler_output=pooled_output,
|
| 870 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 871 |
+
attentions=encoder_outputs.attentions,
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
@add_start_docstrings(
|
| 876 |
+
"""The vision model from CLIP without any head or projection on top.""",
|
| 877 |
+
CLIP_START_DOCSTRING,
|
| 878 |
+
)
|
| 879 |
+
class CLIPVisionModel(CLIPPreTrainedModel):
|
| 880 |
+
config_class = CLIPVisionConfig
|
| 881 |
+
main_input_name = "pixel_values"
|
| 882 |
+
_no_split_modules = ["CLIPEncoderLayer"]
|
| 883 |
+
|
| 884 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 885 |
+
super().__init__(config)
|
| 886 |
+
self.vision_model = CLIPVisionTransformer(config)
|
| 887 |
+
# Initialize weights and apply final processing
|
| 888 |
+
self.post_init()
|
| 889 |
+
|
| 890 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 891 |
+
return self.vision_model.embeddings.patch_embedding
|
| 892 |
+
|
| 893 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 894 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 895 |
+
def forward(
|
| 896 |
+
self,
|
| 897 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 898 |
+
output_attentions: Optional[bool] = None,
|
| 899 |
+
output_hidden_states: Optional[bool] = None,
|
| 900 |
+
return_dict: Optional[bool] = None,
|
| 901 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 902 |
+
r"""
|
| 903 |
+
Returns:
|
| 904 |
+
|
| 905 |
+
Examples:
|
| 906 |
+
|
| 907 |
+
```python
|
| 908 |
+
>>> from PIL import Image
|
| 909 |
+
>>> import requests
|
| 910 |
+
>>> from transformers import AutoProcessor, CLIPVisionModel
|
| 911 |
+
|
| 912 |
+
>>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 913 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 914 |
+
|
| 915 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 916 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 917 |
+
|
| 918 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 919 |
+
|
| 920 |
+
>>> outputs = model(**inputs)
|
| 921 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 922 |
+
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
| 923 |
+
```"""
|
| 924 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 925 |
+
|
| 926 |
+
return self.vision_model(
|
| 927 |
+
pixel_values=pixel_values,
|
| 928 |
+
output_attentions=output_attentions,
|
| 929 |
+
output_hidden_states=output_hidden_states,
|
| 930 |
+
return_dict=return_dict,
|
| 931 |
+
)
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
@add_start_docstrings(CLIP_START_DOCSTRING)
|
| 935 |
+
class CLIPModel(CLIPPreTrainedModel):
|
| 936 |
+
config_class = CLIPConfig
|
| 937 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
| 938 |
+
|
| 939 |
+
def __init__(self, config: CLIPConfig):
|
| 940 |
+
super().__init__(config)
|
| 941 |
+
|
| 942 |
+
if not isinstance(config.text_config, CLIPTextConfig):
|
| 943 |
+
raise ValueError(
|
| 944 |
+
"config.text_config is expected to be of type CLIPTextConfig but is of type"
|
| 945 |
+
f" {type(config.text_config)}."
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
if not isinstance(config.vision_config, CLIPVisionConfig):
|
| 949 |
+
raise ValueError(
|
| 950 |
+
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
| 951 |
+
f" {type(config.vision_config)}."
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
text_config = config.text_config
|
| 955 |
+
vision_config = config.vision_config
|
| 956 |
+
|
| 957 |
+
self.projection_dim = config.projection_dim
|
| 958 |
+
self.text_embed_dim = text_config.hidden_size
|
| 959 |
+
self.vision_embed_dim = vision_config.hidden_size
|
| 960 |
+
|
| 961 |
+
self.text_model = CLIPTextTransformer(text_config)
|
| 962 |
+
self.vision_model = CLIPVisionTransformer(vision_config)
|
| 963 |
+
|
| 964 |
+
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
| 965 |
+
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
| 966 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
| 967 |
+
|
| 968 |
+
# Initialize weights and apply final processing
|
| 969 |
+
self.post_init()
|
| 970 |
+
|
| 971 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 972 |
+
def get_text_features(
|
| 973 |
+
self,
|
| 974 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 975 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 976 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 977 |
+
output_attentions: Optional[bool] = None,
|
| 978 |
+
output_hidden_states: Optional[bool] = None,
|
| 979 |
+
return_dict: Optional[bool] = None,
|
| 980 |
+
) -> torch.FloatTensor:
|
| 981 |
+
r"""
|
| 982 |
+
Returns:
|
| 983 |
+
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
| 984 |
+
applying the projection layer to the pooled output of [`CLIPTextModel`].
|
| 985 |
+
|
| 986 |
+
Examples:
|
| 987 |
+
|
| 988 |
+
```python
|
| 989 |
+
>>> from transformers import AutoTokenizer, CLIPModel
|
| 990 |
+
|
| 991 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 992 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 993 |
+
|
| 994 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 995 |
+
>>> text_features = model.get_text_features(**inputs)
|
| 996 |
+
```"""
|
| 997 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 998 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 999 |
+
output_hidden_states = (
|
| 1000 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1001 |
+
)
|
| 1002 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1003 |
+
|
| 1004 |
+
text_outputs = self.text_model(
|
| 1005 |
+
input_ids=input_ids,
|
| 1006 |
+
attention_mask=attention_mask,
|
| 1007 |
+
position_ids=position_ids,
|
| 1008 |
+
output_attentions=output_attentions,
|
| 1009 |
+
output_hidden_states=output_hidden_states,
|
| 1010 |
+
return_dict=return_dict,
|
| 1011 |
+
)
|
| 1012 |
+
|
| 1013 |
+
pooled_output = text_outputs[1]
|
| 1014 |
+
text_features = self.text_projection(pooled_output)
|
| 1015 |
+
|
| 1016 |
+
return text_features
|
| 1017 |
+
|
| 1018 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1019 |
+
def get_image_features(
|
| 1020 |
+
self,
|
| 1021 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1022 |
+
output_attentions: Optional[bool] = None,
|
| 1023 |
+
output_hidden_states: Optional[bool] = None,
|
| 1024 |
+
return_dict: Optional[bool] = None,
|
| 1025 |
+
) -> torch.FloatTensor:
|
| 1026 |
+
r"""
|
| 1027 |
+
Returns:
|
| 1028 |
+
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
| 1029 |
+
applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
| 1030 |
+
|
| 1031 |
+
Examples:
|
| 1032 |
+
|
| 1033 |
+
```python
|
| 1034 |
+
>>> from PIL import Image
|
| 1035 |
+
>>> import requests
|
| 1036 |
+
>>> from transformers import AutoProcessor, CLIPModel
|
| 1037 |
+
|
| 1038 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1039 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1040 |
+
|
| 1041 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1042 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1043 |
+
|
| 1044 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1045 |
+
|
| 1046 |
+
>>> image_features = model.get_image_features(**inputs)
|
| 1047 |
+
```"""
|
| 1048 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 1049 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1050 |
+
output_hidden_states = (
|
| 1051 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1052 |
+
)
|
| 1053 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1054 |
+
|
| 1055 |
+
vision_outputs = self.vision_model(
|
| 1056 |
+
pixel_values=pixel_values,
|
| 1057 |
+
output_attentions=output_attentions,
|
| 1058 |
+
output_hidden_states=output_hidden_states,
|
| 1059 |
+
return_dict=return_dict,
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 1063 |
+
image_features = self.visual_projection(pooled_output)
|
| 1064 |
+
|
| 1065 |
+
return image_features
|
| 1066 |
+
|
| 1067 |
+
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
|
| 1068 |
+
@replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
|
| 1069 |
+
def forward(
|
| 1070 |
+
self,
|
| 1071 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1072 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1073 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1074 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1075 |
+
return_loss: Optional[bool] = None,
|
| 1076 |
+
output_attentions: Optional[bool] = None,
|
| 1077 |
+
output_hidden_states: Optional[bool] = None,
|
| 1078 |
+
return_dict: Optional[bool] = None,
|
| 1079 |
+
) -> Union[Tuple, CLIPOutput]:
|
| 1080 |
+
r"""
|
| 1081 |
+
Returns:
|
| 1082 |
+
|
| 1083 |
+
Examples:
|
| 1084 |
+
|
| 1085 |
+
```python
|
| 1086 |
+
>>> from PIL import Image
|
| 1087 |
+
>>> import requests
|
| 1088 |
+
>>> from transformers import AutoProcessor, CLIPModel
|
| 1089 |
+
|
| 1090 |
+
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 1091 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1092 |
+
|
| 1093 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1094 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1095 |
+
|
| 1096 |
+
>>> inputs = processor(
|
| 1097 |
+
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
|
| 1098 |
+
... )
|
| 1099 |
+
|
| 1100 |
+
>>> outputs = model(**inputs)
|
| 1101 |
+
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
| 1102 |
+
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
| 1103 |
+
```"""
|
| 1104 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 1105 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1106 |
+
output_hidden_states = (
|
| 1107 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1108 |
+
)
|
| 1109 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1110 |
+
|
| 1111 |
+
vision_outputs = self.vision_model(
|
| 1112 |
+
pixel_values=pixel_values,
|
| 1113 |
+
output_attentions=output_attentions,
|
| 1114 |
+
output_hidden_states=output_hidden_states,
|
| 1115 |
+
return_dict=return_dict,
|
| 1116 |
+
)
|
| 1117 |
+
|
| 1118 |
+
text_outputs = self.text_model(
|
| 1119 |
+
input_ids=input_ids,
|
| 1120 |
+
attention_mask=attention_mask,
|
| 1121 |
+
position_ids=position_ids,
|
| 1122 |
+
output_attentions=output_attentions,
|
| 1123 |
+
output_hidden_states=output_hidden_states,
|
| 1124 |
+
return_dict=return_dict,
|
| 1125 |
+
)
|
| 1126 |
+
|
| 1127 |
+
image_embeds = vision_outputs[1]
|
| 1128 |
+
image_embeds = self.visual_projection(image_embeds)
|
| 1129 |
+
|
| 1130 |
+
text_embeds = text_outputs[1]
|
| 1131 |
+
text_embeds = self.text_projection(text_embeds)
|
| 1132 |
+
|
| 1133 |
+
# normalized features
|
| 1134 |
+
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
|
| 1135 |
+
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
| 1136 |
+
|
| 1137 |
+
# cosine similarity as logits
|
| 1138 |
+
logit_scale = self.logit_scale.exp()
|
| 1139 |
+
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
| 1140 |
+
logits_per_image = logits_per_text.t()
|
| 1141 |
+
|
| 1142 |
+
loss = None
|
| 1143 |
+
if return_loss:
|
| 1144 |
+
loss = clip_loss(logits_per_text)
|
| 1145 |
+
|
| 1146 |
+
if not return_dict:
|
| 1147 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| 1148 |
+
return ((loss,) + output) if loss is not None else output
|
| 1149 |
+
|
| 1150 |
+
return CLIPOutput(
|
| 1151 |
+
loss=loss,
|
| 1152 |
+
logits_per_image=logits_per_image,
|
| 1153 |
+
logits_per_text=logits_per_text,
|
| 1154 |
+
text_embeds=text_embeds,
|
| 1155 |
+
image_embeds=image_embeds,
|
| 1156 |
+
text_model_output=text_outputs,
|
| 1157 |
+
vision_model_output=vision_outputs,
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
|
| 1161 |
+
@add_start_docstrings(
|
| 1162 |
+
"""
|
| 1163 |
+
CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).
|
| 1164 |
+
""",
|
| 1165 |
+
CLIP_START_DOCSTRING,
|
| 1166 |
+
)
|
| 1167 |
+
class CLIPTextModelWithProjection(CLIPPreTrainedModel):
|
| 1168 |
+
config_class = CLIPTextConfig
|
| 1169 |
+
|
| 1170 |
+
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
| 1171 |
+
|
| 1172 |
+
def __init__(self, config: CLIPTextConfig):
|
| 1173 |
+
super().__init__(config)
|
| 1174 |
+
|
| 1175 |
+
self.text_model = CLIPTextTransformer(config)
|
| 1176 |
+
|
| 1177 |
+
self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 1178 |
+
|
| 1179 |
+
# Initialize weights and apply final processing
|
| 1180 |
+
self.post_init()
|
| 1181 |
+
|
| 1182 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1183 |
+
return self.text_model.embeddings.token_embedding
|
| 1184 |
+
|
| 1185 |
+
def set_input_embeddings(self, value):
|
| 1186 |
+
self.text_model.embeddings.token_embedding = value
|
| 1187 |
+
|
| 1188 |
+
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| 1189 |
+
@replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig)
|
| 1190 |
+
def forward(
|
| 1191 |
+
self,
|
| 1192 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1193 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1194 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1195 |
+
output_attentions: Optional[bool] = None,
|
| 1196 |
+
output_hidden_states: Optional[bool] = None,
|
| 1197 |
+
return_dict: Optional[bool] = None,
|
| 1198 |
+
) -> Union[Tuple, CLIPTextModelOutput]:
|
| 1199 |
+
r"""
|
| 1200 |
+
Returns:
|
| 1201 |
+
|
| 1202 |
+
Examples:
|
| 1203 |
+
|
| 1204 |
+
```python
|
| 1205 |
+
>>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
| 1206 |
+
|
| 1207 |
+
>>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| 1208 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
| 1209 |
+
|
| 1210 |
+
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| 1211 |
+
|
| 1212 |
+
>>> outputs = model(**inputs)
|
| 1213 |
+
>>> text_embeds = outputs.text_embeds
|
| 1214 |
+
```"""
|
| 1215 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1216 |
+
|
| 1217 |
+
text_outputs = self.text_model(
|
| 1218 |
+
input_ids=input_ids,
|
| 1219 |
+
attention_mask=attention_mask,
|
| 1220 |
+
position_ids=position_ids,
|
| 1221 |
+
output_attentions=output_attentions,
|
| 1222 |
+
output_hidden_states=output_hidden_states,
|
| 1223 |
+
return_dict=return_dict,
|
| 1224 |
+
)
|
| 1225 |
+
|
| 1226 |
+
pooled_output = text_outputs[1]
|
| 1227 |
+
|
| 1228 |
+
text_embeds = self.text_projection(pooled_output)
|
| 1229 |
+
|
| 1230 |
+
if not return_dict:
|
| 1231 |
+
outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
|
| 1232 |
+
return tuple(output for output in outputs if output is not None)
|
| 1233 |
+
|
| 1234 |
+
return CLIPTextModelOutput(
|
| 1235 |
+
text_embeds=text_embeds,
|
| 1236 |
+
last_hidden_state=text_outputs.last_hidden_state,
|
| 1237 |
+
hidden_states=text_outputs.hidden_states,
|
| 1238 |
+
attentions=text_outputs.attentions,
|
| 1239 |
+
)
|
| 1240 |
+
|
| 1241 |
+
|
| 1242 |
+
@add_start_docstrings(
|
| 1243 |
+
"""
|
| 1244 |
+
CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).
|
| 1245 |
+
""",
|
| 1246 |
+
CLIP_START_DOCSTRING,
|
| 1247 |
+
)
|
| 1248 |
+
class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
|
| 1249 |
+
config_class = CLIPVisionConfig
|
| 1250 |
+
main_input_name = "pixel_values"
|
| 1251 |
+
|
| 1252 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 1253 |
+
super().__init__(config)
|
| 1254 |
+
|
| 1255 |
+
self.vision_model = CLIPVisionTransformer(config)
|
| 1256 |
+
|
| 1257 |
+
self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
| 1258 |
+
|
| 1259 |
+
# Initialize weights and apply final processing
|
| 1260 |
+
self.post_init()
|
| 1261 |
+
|
| 1262 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1263 |
+
return self.vision_model.embeddings.patch_embedding
|
| 1264 |
+
|
| 1265 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 1266 |
+
@replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig)
|
| 1267 |
+
def forward(
|
| 1268 |
+
self,
|
| 1269 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 1270 |
+
output_attentions: Optional[bool] = None,
|
| 1271 |
+
output_hidden_states: Optional[bool] = None,
|
| 1272 |
+
return_dict: Optional[bool] = None,
|
| 1273 |
+
) -> Union[Tuple, CLIPVisionModelOutput]:
|
| 1274 |
+
r"""
|
| 1275 |
+
Returns:
|
| 1276 |
+
|
| 1277 |
+
Examples:
|
| 1278 |
+
|
| 1279 |
+
```python
|
| 1280 |
+
>>> from PIL import Image
|
| 1281 |
+
>>> import requests
|
| 1282 |
+
>>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
| 1283 |
+
|
| 1284 |
+
>>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| 1285 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 1286 |
+
|
| 1287 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 1288 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1289 |
+
|
| 1290 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 1291 |
+
|
| 1292 |
+
>>> outputs = model(**inputs)
|
| 1293 |
+
>>> image_embeds = outputs.image_embeds
|
| 1294 |
+
```"""
|
| 1295 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1296 |
+
|
| 1297 |
+
vision_outputs = self.vision_model(
|
| 1298 |
+
pixel_values=pixel_values,
|
| 1299 |
+
output_attentions=output_attentions,
|
| 1300 |
+
output_hidden_states=output_hidden_states,
|
| 1301 |
+
return_dict=return_dict,
|
| 1302 |
+
)
|
| 1303 |
+
|
| 1304 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 1305 |
+
|
| 1306 |
+
image_embeds = self.visual_projection(pooled_output)
|
| 1307 |
+
|
| 1308 |
+
if not return_dict:
|
| 1309 |
+
outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
|
| 1310 |
+
return tuple(output for output in outputs if output is not None)
|
| 1311 |
+
|
| 1312 |
+
return CLIPVisionModelOutput(
|
| 1313 |
+
image_embeds=image_embeds,
|
| 1314 |
+
last_hidden_state=vision_outputs.last_hidden_state,
|
| 1315 |
+
hidden_states=vision_outputs.hidden_states,
|
| 1316 |
+
attentions=vision_outputs.attentions,
|
| 1317 |
+
)
|
| 1318 |
+
|
| 1319 |
+
|
| 1320 |
+
@add_start_docstrings(
|
| 1321 |
+
"""
|
| 1322 |
+
CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
|
| 1323 |
+
the patch tokens) e.g. for ImageNet.
|
| 1324 |
+
""",
|
| 1325 |
+
CLIP_START_DOCSTRING,
|
| 1326 |
+
)
|
| 1327 |
+
class CLIPForImageClassification(CLIPPreTrainedModel):
|
| 1328 |
+
main_input_name = "pixel_values"
|
| 1329 |
+
|
| 1330 |
+
def __init__(self, config: CLIPConfig) -> None:
|
| 1331 |
+
super().__init__(config)
|
| 1332 |
+
|
| 1333 |
+
self.num_labels = config.num_labels
|
| 1334 |
+
self.vision_model = CLIPVisionTransformer(config.vision_config)
|
| 1335 |
+
|
| 1336 |
+
# Classifier head
|
| 1337 |
+
self.classifier = (
|
| 1338 |
+
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 1339 |
+
)
|
| 1340 |
+
|
| 1341 |
+
# Initialize weights and apply final processing
|
| 1342 |
+
self.post_init()
|
| 1343 |
+
|
| 1344 |
+
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
|
| 1345 |
+
@add_code_sample_docstrings(
|
| 1346 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 1347 |
+
output_type=ImageClassifierOutput,
|
| 1348 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1349 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 1350 |
+
)
|
| 1351 |
+
def forward(
|
| 1352 |
+
self,
|
| 1353 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1354 |
+
labels: Optional[torch.Tensor] = None,
|
| 1355 |
+
output_attentions: Optional[bool] = None,
|
| 1356 |
+
output_hidden_states: Optional[bool] = None,
|
| 1357 |
+
return_dict: Optional[bool] = None,
|
| 1358 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
| 1359 |
+
r"""
|
| 1360 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1361 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 1362 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1363 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1364 |
+
"""
|
| 1365 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1366 |
+
output_hidden_states = (
|
| 1367 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1368 |
+
)
|
| 1369 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1370 |
+
|
| 1371 |
+
outputs = self.vision_model(
|
| 1372 |
+
pixel_values,
|
| 1373 |
+
output_attentions=output_attentions,
|
| 1374 |
+
output_hidden_states=output_hidden_states,
|
| 1375 |
+
return_dict=return_dict,
|
| 1376 |
+
)
|
| 1377 |
+
|
| 1378 |
+
sequence_output = outputs[0]
|
| 1379 |
+
|
| 1380 |
+
# average pool the patch tokens
|
| 1381 |
+
sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
|
| 1382 |
+
# apply classifier
|
| 1383 |
+
logits = self.classifier(sequence_output)
|
| 1384 |
+
|
| 1385 |
+
loss = None
|
| 1386 |
+
if labels is not None:
|
| 1387 |
+
# move labels to correct device to enable model parallelism
|
| 1388 |
+
labels = labels.to(logits.device)
|
| 1389 |
+
if self.config.problem_type is None:
|
| 1390 |
+
if self.num_labels == 1:
|
| 1391 |
+
self.config.problem_type = "regression"
|
| 1392 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1393 |
+
self.config.problem_type = "single_label_classification"
|
| 1394 |
+
else:
|
| 1395 |
+
self.config.problem_type = "multi_label_classification"
|
| 1396 |
+
|
| 1397 |
+
if self.config.problem_type == "regression":
|
| 1398 |
+
loss_fct = MSELoss()
|
| 1399 |
+
if self.num_labels == 1:
|
| 1400 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 1401 |
+
else:
|
| 1402 |
+
loss = loss_fct(logits, labels)
|
| 1403 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1404 |
+
loss_fct = CrossEntropyLoss()
|
| 1405 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1406 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1407 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1408 |
+
loss = loss_fct(logits, labels)
|
| 1409 |
+
|
| 1410 |
+
if not return_dict:
|
| 1411 |
+
output = (logits,) + outputs[2:]
|
| 1412 |
+
return ((loss,) + output) if loss is not None else output
|
| 1413 |
+
|
| 1414 |
+
return ImageClassifierOutput(
|
| 1415 |
+
loss=loss,
|
| 1416 |
+
logits=logits,
|
| 1417 |
+
hidden_states=outputs.hidden_states,
|
| 1418 |
+
attentions=outputs.attentions,
|
| 1419 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip/modeling_conditional_clip.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Reference:
|
| 16 |
+
# * transformers/models/dinov2/modeling_dinov2.py
|
| 17 |
+
# * https://github.com/facebookresearch/DiT/blob/main/models.py#L101
|
| 18 |
+
# * https://github.com/3DTopia/OpenLRM/tree/main/openlrm/models/encoders/dinov2
|
| 19 |
+
""" PyTorch CLIP model."""
|
| 20 |
+
|
| 21 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn as nn
|
| 25 |
+
|
| 26 |
+
from .modeling_clip import (
|
| 27 |
+
CLIPConfig,
|
| 28 |
+
CLIPTextConfig,
|
| 29 |
+
CLIPVisionConfig,
|
| 30 |
+
CLIPEncoderLayer,
|
| 31 |
+
CLIPTextTransformer,
|
| 32 |
+
CLIPVisionTransformer,
|
| 33 |
+
CLIPModel,
|
| 34 |
+
CLIPVisionEmbeddings,
|
| 35 |
+
CLIPVisionModel,
|
| 36 |
+
CLIPOutput,
|
| 37 |
+
BaseModelOutput,
|
| 38 |
+
BaseModelOutputWithPooling
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ModLN(nn.Module):
|
| 43 |
+
def __init__(self, inner_dim: int, mod_dim: int = 32):
|
| 44 |
+
super().__init__()
|
| 45 |
+
self.mlp = nn.Sequential(
|
| 46 |
+
nn.SiLU(),
|
| 47 |
+
nn.Linear(mod_dim, inner_dim * 2),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
for m in self.modules():
|
| 51 |
+
if isinstance(m, nn.Linear):
|
| 52 |
+
nn.init.zeros_(m.weight)
|
| 53 |
+
nn.init.zeros_(m.bias)
|
| 54 |
+
|
| 55 |
+
def forward(self, x:torch.Tensor, condition:torch.Tensor):
|
| 56 |
+
'''
|
| 57 |
+
x: [N, M, C_in], M: num of tokens
|
| 58 |
+
condition: [N, C_mod]
|
| 59 |
+
'''
|
| 60 |
+
shift, scale = self.mlp(condition).unsqueeze(1).chunk(2, dim=-1)
|
| 61 |
+
return x * (1 + scale) + shift
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class ConditionalCLIPVisionConfig(CLIPVisionConfig):
|
| 65 |
+
def __init__(self, modulation_dim: int = 32, *args, **kwargs):
|
| 66 |
+
super().__init__(*args, **kwargs)
|
| 67 |
+
self.modulation_dim = modulation_dim
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class ConditionalCLIPEncoderLayer(CLIPEncoderLayer):
|
| 71 |
+
"""This corresponds to the Block class in the original implementation."""
|
| 72 |
+
|
| 73 |
+
def __init__(self, config: ConditionalCLIPVisionConfig) -> None:
|
| 74 |
+
super().__init__(config)
|
| 75 |
+
self.mod_norm1 = ModLN(config.hidden_size, config.modulation_dim)
|
| 76 |
+
self.mod_norm2 = ModLN(config.hidden_size, config.modulation_dim)
|
| 77 |
+
|
| 78 |
+
def forward(
|
| 79 |
+
self,
|
| 80 |
+
hidden_states: torch.Tensor,
|
| 81 |
+
attention_mask: torch.Tensor,
|
| 82 |
+
causal_attention_mask: torch.Tensor,
|
| 83 |
+
condition: Optional[torch.Tensor] = None,
|
| 84 |
+
output_attentions: bool = False,
|
| 85 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 86 |
+
residual = hidden_states
|
| 87 |
+
|
| 88 |
+
hidden_states = self.mod_norm1(self.layer_norm1(hidden_states), condition)
|
| 89 |
+
hidden_states, attn_weights = self.self_attn(
|
| 90 |
+
hidden_states=hidden_states,
|
| 91 |
+
attention_mask=attention_mask,
|
| 92 |
+
causal_attention_mask=causal_attention_mask,
|
| 93 |
+
output_attentions=output_attentions,
|
| 94 |
+
)
|
| 95 |
+
hidden_states = residual + hidden_states
|
| 96 |
+
|
| 97 |
+
residual = hidden_states
|
| 98 |
+
hidden_states = self.mod_norm2(self.layer_norm2(hidden_states), condition)
|
| 99 |
+
hidden_states = self.mlp(hidden_states)
|
| 100 |
+
hidden_states = residual + hidden_states
|
| 101 |
+
|
| 102 |
+
outputs = (hidden_states,)
|
| 103 |
+
|
| 104 |
+
if output_attentions:
|
| 105 |
+
outputs += (attn_weights,)
|
| 106 |
+
|
| 107 |
+
return outputs
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class ConditionalCLIPEncoder(nn.Module):
|
| 111 |
+
def __init__(self, config: CLIPConfig) -> None:
|
| 112 |
+
super().__init__()
|
| 113 |
+
self.config = config
|
| 114 |
+
self.layers = nn.ModuleList([ConditionalCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 115 |
+
self.gradient_checkpointing = False
|
| 116 |
+
|
| 117 |
+
def forward(
|
| 118 |
+
self,
|
| 119 |
+
inputs_embeds,
|
| 120 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 121 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
| 122 |
+
output_attentions: Optional[bool] = None,
|
| 123 |
+
output_hidden_states: Optional[bool] = None,
|
| 124 |
+
condition: Optional[torch.Tensor] = None,
|
| 125 |
+
return_dict: Optional[bool] = None,
|
| 126 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 127 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 128 |
+
output_hidden_states = (
|
| 129 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 130 |
+
)
|
| 131 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 132 |
+
|
| 133 |
+
encoder_states = () if output_hidden_states else None
|
| 134 |
+
all_attentions = () if output_attentions else None
|
| 135 |
+
|
| 136 |
+
hidden_states = inputs_embeds
|
| 137 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 138 |
+
if output_hidden_states:
|
| 139 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 140 |
+
if self.gradient_checkpointing and self.training:
|
| 141 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 142 |
+
encoder_layer.__call__,
|
| 143 |
+
hidden_states,
|
| 144 |
+
attention_mask,
|
| 145 |
+
causal_attention_mask,
|
| 146 |
+
condition=condition,
|
| 147 |
+
output_attentions=output_attentions,
|
| 148 |
+
)
|
| 149 |
+
else:
|
| 150 |
+
layer_outputs = encoder_layer(
|
| 151 |
+
hidden_states,
|
| 152 |
+
attention_mask,
|
| 153 |
+
causal_attention_mask,
|
| 154 |
+
condition=condition,
|
| 155 |
+
output_attentions=output_attentions,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
hidden_states = layer_outputs[0]
|
| 159 |
+
|
| 160 |
+
if output_attentions:
|
| 161 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 162 |
+
|
| 163 |
+
if output_hidden_states:
|
| 164 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 165 |
+
|
| 166 |
+
if not return_dict:
|
| 167 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 168 |
+
return BaseModelOutput(
|
| 169 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class ConditionalCLIPVisionTransformer(CLIPVisionTransformer):
|
| 174 |
+
def __init__(self, config: ConditionalCLIPVisionConfig):
|
| 175 |
+
super().__init__(config)
|
| 176 |
+
self.config = config
|
| 177 |
+
embed_dim = config.hidden_size
|
| 178 |
+
|
| 179 |
+
self.embeddings = CLIPVisionEmbeddings(config)
|
| 180 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 181 |
+
self.encoder = ConditionalCLIPEncoder(config)
|
| 182 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 183 |
+
|
| 184 |
+
def forward(
|
| 185 |
+
self,
|
| 186 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 187 |
+
condition: Optional[torch.Tensor] = None,
|
| 188 |
+
output_attentions: Optional[bool] = None,
|
| 189 |
+
output_hidden_states: Optional[bool] = None,
|
| 190 |
+
return_dict: Optional[bool] = None,
|
| 191 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 192 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 193 |
+
output_hidden_states = (
|
| 194 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 195 |
+
)
|
| 196 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 197 |
+
|
| 198 |
+
if pixel_values is None:
|
| 199 |
+
raise ValueError("You have to specify pixel_values")
|
| 200 |
+
|
| 201 |
+
hidden_states = self.embeddings(pixel_values)
|
| 202 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
| 203 |
+
|
| 204 |
+
encoder_outputs = self.encoder(
|
| 205 |
+
inputs_embeds=hidden_states,
|
| 206 |
+
output_attentions=output_attentions,
|
| 207 |
+
output_hidden_states=output_hidden_states,
|
| 208 |
+
condition=condition,
|
| 209 |
+
return_dict=return_dict,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
last_hidden_state = encoder_outputs[0]
|
| 213 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 214 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 215 |
+
|
| 216 |
+
if not return_dict:
|
| 217 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 218 |
+
|
| 219 |
+
return BaseModelOutputWithPooling(
|
| 220 |
+
last_hidden_state=last_hidden_state,
|
| 221 |
+
pooler_output=pooled_output,
|
| 222 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 223 |
+
attentions=encoder_outputs.attentions,
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class ConditionalCLIPVisionModel(CLIPVisionModel):
|
| 228 |
+
config_class = ConditionalCLIPVisionConfig
|
| 229 |
+
|
| 230 |
+
def __init__(self, config: ConditionalCLIPVisionConfig):
|
| 231 |
+
super().__init__(config)
|
| 232 |
+
self.vision_model = ConditionalCLIPVisionTransformer(config)
|
| 233 |
+
# Initialize weights and apply final processing
|
| 234 |
+
self.post_init()
|
| 235 |
+
|
| 236 |
+
def forward(
|
| 237 |
+
self,
|
| 238 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 239 |
+
condition: Optional[torch.Tensor] = None,
|
| 240 |
+
output_attentions: Optional[bool] = None,
|
| 241 |
+
output_hidden_states: Optional[bool] = None,
|
| 242 |
+
return_dict: Optional[bool] = None,
|
| 243 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 244 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 245 |
+
|
| 246 |
+
return self.vision_model(
|
| 247 |
+
pixel_values=pixel_values,
|
| 248 |
+
condition=condition,
|
| 249 |
+
output_attentions=output_attentions,
|
| 250 |
+
output_hidden_states=output_hidden_states,
|
| 251 |
+
return_dict=return_dict,
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class ConditionalCLIPModel(CLIPModel):
|
| 256 |
+
config_class = CLIPConfig
|
| 257 |
+
|
| 258 |
+
def __init__(self, config: CLIPConfig):
|
| 259 |
+
super().__init__(config)
|
| 260 |
+
|
| 261 |
+
if not isinstance(config.text_config, CLIPTextConfig):
|
| 262 |
+
raise ValueError(
|
| 263 |
+
"config.text_config is expected to be of type CLIPTextConfig but is of type"
|
| 264 |
+
f" {type(config.text_config)}."
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
if not isinstance(config.vision_config, CLIPVisionConfig):
|
| 268 |
+
raise ValueError(
|
| 269 |
+
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
| 270 |
+
f" {type(config.vision_config)}."
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
text_config = config.text_config
|
| 274 |
+
vision_config = config.vision_config
|
| 275 |
+
|
| 276 |
+
self.projection_dim = config.projection_dim
|
| 277 |
+
self.text_embed_dim = text_config.hidden_size
|
| 278 |
+
self.vision_embed_dim = vision_config.hidden_size
|
| 279 |
+
|
| 280 |
+
self.text_model = CLIPTextTransformer(text_config)
|
| 281 |
+
self.vision_model = ConditionalCLIPVisionTransformer(vision_config)
|
| 282 |
+
|
| 283 |
+
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
| 284 |
+
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
| 285 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
| 286 |
+
|
| 287 |
+
# Initialize weights and apply final processing
|
| 288 |
+
self.post_init()
|
| 289 |
+
|
| 290 |
+
def get_image_features(
|
| 291 |
+
self,
|
| 292 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 293 |
+
condition: Optional[torch.Tensor] = None,
|
| 294 |
+
output_attentions: Optional[bool] = None,
|
| 295 |
+
output_hidden_states: Optional[bool] = None,
|
| 296 |
+
return_dict: Optional[bool] = None,
|
| 297 |
+
) -> torch.FloatTensor:
|
| 298 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 299 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 300 |
+
output_hidden_states = (
|
| 301 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 302 |
+
)
|
| 303 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 304 |
+
|
| 305 |
+
vision_outputs = self.vision_model(
|
| 306 |
+
pixel_values=pixel_values,
|
| 307 |
+
condition=condition,
|
| 308 |
+
output_attentions=output_attentions,
|
| 309 |
+
output_hidden_states=output_hidden_states,
|
| 310 |
+
return_dict=return_dict,
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
pooled_output = vision_outputs[1] # pooled_output
|
| 314 |
+
image_features = self.visual_projection(pooled_output)
|
| 315 |
+
|
| 316 |
+
return image_features
|
| 317 |
+
|
| 318 |
+
def forward(
|
| 319 |
+
self,
|
| 320 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 321 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 322 |
+
condition: Optional[torch.Tensor] = None,
|
| 323 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 324 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 325 |
+
return_loss: Optional[bool] = None,
|
| 326 |
+
output_attentions: Optional[bool] = None,
|
| 327 |
+
output_hidden_states: Optional[bool] = None,
|
| 328 |
+
return_dict: Optional[bool] = None,
|
| 329 |
+
) -> Union[Tuple, CLIPOutput]:
|
| 330 |
+
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
| 331 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 332 |
+
output_hidden_states = (
|
| 333 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 334 |
+
)
|
| 335 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 336 |
+
|
| 337 |
+
vision_outputs = self.vision_model(
|
| 338 |
+
pixel_values=pixel_values,
|
| 339 |
+
condition=condition,
|
| 340 |
+
output_attentions=output_attentions,
|
| 341 |
+
output_hidden_states=output_hidden_states,
|
| 342 |
+
return_dict=return_dict,
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
text_outputs = self.text_model(
|
| 346 |
+
input_ids=input_ids,
|
| 347 |
+
attention_mask=attention_mask,
|
| 348 |
+
position_ids=position_ids,
|
| 349 |
+
output_attentions=output_attentions,
|
| 350 |
+
output_hidden_states=output_hidden_states,
|
| 351 |
+
return_dict=return_dict,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
image_embeds = vision_outputs[1]
|
| 355 |
+
image_embeds = self.visual_projection(image_embeds)
|
| 356 |
+
|
| 357 |
+
text_embeds = text_outputs[1]
|
| 358 |
+
text_embeds = self.text_projection(text_embeds)
|
| 359 |
+
|
| 360 |
+
# normalized features
|
| 361 |
+
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
|
| 362 |
+
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
| 363 |
+
|
| 364 |
+
# cosine similarity as logits
|
| 365 |
+
logit_scale = self.logit_scale.exp()
|
| 366 |
+
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
| 367 |
+
logits_per_image = logits_per_text.t()
|
| 368 |
+
|
| 369 |
+
loss = None
|
| 370 |
+
if return_loss:
|
| 371 |
+
loss = clip_loss(logits_per_text)
|
| 372 |
+
|
| 373 |
+
if not return_dict:
|
| 374 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| 375 |
+
return ((loss,) + output) if loss is not None else output
|
| 376 |
+
|
| 377 |
+
return CLIPOutput(
|
| 378 |
+
loss=loss,
|
| 379 |
+
logits_per_image=logits_per_image,
|
| 380 |
+
logits_per_text=logits_per_text,
|
| 381 |
+
text_embeds=text_embeds,
|
| 382 |
+
image_embeds=image_embeds,
|
| 383 |
+
text_model_output=text_outputs,
|
| 384 |
+
vision_model_output=vision_outputs,
|
| 385 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/clip_dinov2_encoder.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
import numpy as np
|
| 5 |
+
import re
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
|
| 10 |
+
from transformers import CLIPTokenizer, CLIPImageProcessor
|
| 11 |
+
from transformers import AutoImageProcessor
|
| 12 |
+
from transformers import T5EncoderModel, T5Tokenizer, AutoTokenizer
|
| 13 |
+
from transformers.utils import ModelOutput
|
| 14 |
+
from typing import Iterable, Optional, Union, List
|
| 15 |
+
|
| 16 |
+
import craftsman
|
| 17 |
+
from craftsman.utils.typing import *
|
| 18 |
+
from .clip.modeling_clip import CLIPModel
|
| 19 |
+
from .clip.modeling_conditional_clip import ConditionalCLIPModel
|
| 20 |
+
from .base import BaseEmbedder, ImageType
|
| 21 |
+
from .dinov2.modeling_dinov2 import Dinov2Model
|
| 22 |
+
from .dinov2.modeling_conditional_dinov2 import ConditionalDinov2Model
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class CLIPEmbedOutput(ModelOutput):
|
| 26 |
+
last_hidden_state: torch.FloatTensor = None
|
| 27 |
+
pooler_output: torch.FloatTensor = None
|
| 28 |
+
embeds: torch.FloatTensor = None
|
| 29 |
+
|
| 30 |
+
class DINOEmbedOutput(ModelOutput):
|
| 31 |
+
last_hidden_state: torch.FloatTensor = None
|
| 32 |
+
pooler_output: torch.FloatTensor = None
|
| 33 |
+
|
| 34 |
+
@craftsman.register("clip-dinov2-embedder")
|
| 35 |
+
class ClipDinoEmbedder(BaseEmbedder):
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class Config(BaseEmbedder.Config):
|
| 39 |
+
pretrained_model_name_or_path: Optional[str] = None # the pretrained model name or path for condition model
|
| 40 |
+
pretrained_clip_name_or_path: Optional[str] = None # the pretrained model name or path for clip
|
| 41 |
+
pretrained_dino_name_or_path: Optional[str] = None # the pretrained model name or path for dino
|
| 42 |
+
pretrained_linear_proj: Optional[str] = None
|
| 43 |
+
freeze_modulation_clip: bool = False
|
| 44 |
+
freeze_modulation_dino: bool = False
|
| 45 |
+
config_path: str = ''
|
| 46 |
+
enable_gradient_checkpointing: bool = False
|
| 47 |
+
embeds_fusion_mode: int = 1 # 0: sum | 1: concat
|
| 48 |
+
linear_proj_init: str = "constant"
|
| 49 |
+
text_max_length: int = 77
|
| 50 |
+
image_size_clip: int = 224
|
| 51 |
+
image_size_dino: int = 224
|
| 52 |
+
|
| 53 |
+
cfg: Config
|
| 54 |
+
|
| 55 |
+
def configure(self) -> None:
|
| 56 |
+
super().configure()
|
| 57 |
+
|
| 58 |
+
# Load the CLIP model and processor
|
| 59 |
+
if not self.cfg.encode_camera:
|
| 60 |
+
if self.cfg.pretrained_clip_name_or_path is not None:
|
| 61 |
+
self.clip_model: CLIPModel = CLIPModel.from_pretrained(self.cfg.pretrained_clip_name_or_path)
|
| 62 |
+
else:
|
| 63 |
+
self.clip_model: CLIPModel = CLIPModel(config=ConditionalCLIPModel.config_class.from_pretrained(
|
| 64 |
+
"openai/clip-vit-large-patch14",
|
| 65 |
+
))
|
| 66 |
+
if self.cfg.pretrained_dino_name_or_path is not None:
|
| 67 |
+
self.dino_model: Dinov2Model = Dinov2Model.from_pretrained(self.cfg.pretrained_dino_name_or_path)
|
| 68 |
+
else:
|
| 69 |
+
self.dino_model: Dinov2Model = Dinov2Model(config=ConditionalDinov2Model.config_class.from_pretrained(
|
| 70 |
+
"facebook/dinov2-base",
|
| 71 |
+
))
|
| 72 |
+
else:
|
| 73 |
+
if self.cfg.pretrained_clip_name_or_path == '':
|
| 74 |
+
assert self.cfg.config_path is not None, "The config path should be provided"
|
| 75 |
+
conditional_clip_config = ConditionalCLIPModel.config_class.from_json_file(self.cfg.config_path)
|
| 76 |
+
conditional_clip_config.vision_config.modulation_dim = self.cfg.camera_embeds_dim
|
| 77 |
+
self.clip_model: CLIPModel = ConditionalCLIPModel(conditional_clip_config)
|
| 78 |
+
else:
|
| 79 |
+
|
| 80 |
+
# clip
|
| 81 |
+
conditional_clip_config = ConditionalCLIPModel.config_class.from_pretrained(
|
| 82 |
+
self.cfg.pretrained_clip_name_or_path,
|
| 83 |
+
)
|
| 84 |
+
conditional_clip_config.vision_config.modulation_dim = self.cfg.camera_embeds_dim
|
| 85 |
+
self.clip_model: CLIPModel = ConditionalCLIPModel.from_pretrained(
|
| 86 |
+
self.cfg.pretrained_clip_name_or_path,
|
| 87 |
+
vision_config=conditional_clip_config.vision_config
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# dino
|
| 91 |
+
conditional_vit_config = ConditionalDinov2Model.config_class.from_pretrained(
|
| 92 |
+
self.cfg.pretrained_dino_name_or_path,
|
| 93 |
+
)
|
| 94 |
+
conditional_vit_config.modulation_dim = self.cfg.camera_embeds_dim
|
| 95 |
+
self.dino_model: ConditionalDinov2Model = ConditionalDinov2Model.from_pretrained(
|
| 96 |
+
self.cfg.pretrained_dino_name_or_path,
|
| 97 |
+
config=conditional_vit_config
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
self.image_preprocess_clip = CLIPImageProcessor()
|
| 101 |
+
self.image_preprocess_dino = AutoImageProcessor.from_pretrained(
|
| 102 |
+
self.cfg.pretrained_dino_name_or_path if self.cfg.pretrained_dino_name_or_path is not None else "facebook/dinov2-base",
|
| 103 |
+
)
|
| 104 |
+
self.transform_clip= transforms.Compose(
|
| 105 |
+
[
|
| 106 |
+
transforms.Resize(self.cfg.image_size_clip, transforms.InterpolationMode.BICUBIC, antialias=True),
|
| 107 |
+
transforms.CenterCrop(self.cfg.image_size_clip), # crop a (224, 224) square
|
| 108 |
+
transforms.Normalize(
|
| 109 |
+
mean=[0.48145466, 0.4578275, 0.40821073],
|
| 110 |
+
std=[0.26862954, 0.26130258, 0.27577711],
|
| 111 |
+
),
|
| 112 |
+
]
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
self.transform_dino = transforms.Compose(
|
| 116 |
+
[
|
| 117 |
+
transforms.Resize(self.cfg.image_size_dino, transforms.InterpolationMode.BICUBIC, antialias=True),
|
| 118 |
+
transforms.CenterCrop(self.cfg.image_size_dino), # crop a (224, 224) square
|
| 119 |
+
transforms.Normalize(
|
| 120 |
+
mean=[0.485, 0.456, 0.406],
|
| 121 |
+
std=[0.229, 0.224, 0.225],
|
| 122 |
+
),
|
| 123 |
+
]
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if self.cfg.enable_gradient_checkpointing:
|
| 127 |
+
self.dino_model.encoder.gradient_checkpointing = True
|
| 128 |
+
|
| 129 |
+
if self.cfg.zero_uncond_embeds:
|
| 130 |
+
self.empty_image_embeds_clip = torch.zeros((self.cfg.n_views, 257, 1024)).detach()
|
| 131 |
+
self.empty_image_embeds_dino = torch.zeros((self.cfg.n_views, 257, 1024)).detach()
|
| 132 |
+
self.empty_image_embeds = torch.cat([self.empty_image_embeds_clip, self.empty_image_embeds_dino], dim=1)
|
| 133 |
+
else:
|
| 134 |
+
if self.cfg.encode_camera:
|
| 135 |
+
self.empty_image_embeds_clip = self.encode_image_clip(torch.zeros(self.cfg.n_views, self.cfg.image_size_clip, self.cfg.image_size_clip, 3), self.cameras[:self.cfg.n_views]).detach()
|
| 136 |
+
self.empty_image_embeds_dino = self.encode_image_dino(torch.zeros(self.cfg.n_views, self.cfg.image_size_clip, self.cfg.image_size_clip, 3), self.cameras[:self.cfg.n_views]).detach()
|
| 137 |
+
self.empty_image_embeds = torch.cat([self.empty_image_embeds_clip, self.empty_image_embeds_dino], dim=1)
|
| 138 |
+
else:
|
| 139 |
+
self.empty_image_embeds_clip = self.encode_image_clip(torch.zeros(self.cfg.n_views, self.cfg.image_size_dino, self.cfg.image_size_dino, 3)).detach()
|
| 140 |
+
self.empty_image_embeds_dino = self.encode_image_dino(torch.zeros(self.cfg.n_views, self.cfg.image_size_dino, self.cfg.image_size_dino, 3)).detach()
|
| 141 |
+
self.empty_image_embeds = torch.cat([self.empty_image_embeds_clip, self.empty_image_embeds_dino], dim=1)
|
| 142 |
+
|
| 143 |
+
# Freeze the clip model parameters
|
| 144 |
+
self.clip_model.eval()
|
| 145 |
+
for k, p in self.clip_model.named_parameters():
|
| 146 |
+
ks = k.split('.')
|
| 147 |
+
if 'mod_norm1' in ks or 'mod_norm2' in ks and not self.cfg.freeze_modulation_clip:
|
| 148 |
+
p.requires_grad_(not self.cfg.freeze_modulation_clip)
|
| 149 |
+
else:
|
| 150 |
+
p.requires_grad_(False)
|
| 151 |
+
|
| 152 |
+
# freeze the dino model parameters
|
| 153 |
+
self.dino_model.eval()
|
| 154 |
+
for k, p in self.dino_model.named_parameters():
|
| 155 |
+
ks = k.split('.')
|
| 156 |
+
if 'mod_norm1' in ks or 'mod_norm2' in ks and not self.cfg.freeze_modulation_dino:
|
| 157 |
+
p.requires_grad_(not self.cfg.freeze_modulation_dino)
|
| 158 |
+
else:
|
| 159 |
+
p.requires_grad_(False)
|
| 160 |
+
|
| 161 |
+
self.linear_proj = nn.Linear(768, 1024, bias=False)
|
| 162 |
+
if self.cfg.linear_proj_init == "constant":
|
| 163 |
+
nn.init.constant_(self.linear_proj.weight, 0)
|
| 164 |
+
elif self.cfg.linear_proj_init == "xavier":
|
| 165 |
+
nn.init.xavier_uniform_(self.linear_proj.weight)
|
| 166 |
+
else:
|
| 167 |
+
raise ValueError
|
| 168 |
+
|
| 169 |
+
if self.cfg.pretrained_model_name_or_path is not None:
|
| 170 |
+
print(f"Loading ckpt from {self.cfg.pretrained_model_name_or_path}")
|
| 171 |
+
ckpt = torch.load(self.cfg.pretrained_model_name_or_path, map_location="cpu")
|
| 172 |
+
if 'state_dict' in ckpt:
|
| 173 |
+
ckpt = ckpt['state_dict']
|
| 174 |
+
pretrained_model_ckpt = {}
|
| 175 |
+
for k, v in ckpt.items():
|
| 176 |
+
if k.startswith('condition.'):
|
| 177 |
+
pretrained_model_ckpt[k.replace('condition.', '')] = v
|
| 178 |
+
self.load_state_dict(pretrained_model_ckpt, strict=False)
|
| 179 |
+
|
| 180 |
+
def encode_image_clip(self, images: Iterable[Optional[ImageType]], cameras: Optional[torch.Tensor] = None, force_none_camera_embeds: bool = False, return_dict: bool = False, **kwargs) -> torch.FloatTensor:
|
| 181 |
+
camera_embeds = None
|
| 182 |
+
if isinstance(images, (np.ndarray, torch.Tensor)): # for training process
|
| 183 |
+
assert images.min() >= 0.0 and images.max() <= 1.0, "The pixel values should be in the range of [0, 1]"
|
| 184 |
+
do_rescale = False
|
| 185 |
+
if self.cfg.encode_camera:
|
| 186 |
+
assert cameras is not None, "The cameras should be provided"
|
| 187 |
+
camera_embeds = self.encode_camera(cameras)
|
| 188 |
+
pixel_values = self.transform_clip(images.permute(0, 3, 1, 2))
|
| 189 |
+
else: # for inference process
|
| 190 |
+
do_rescale = True
|
| 191 |
+
if self.cfg.encode_camera:
|
| 192 |
+
if cameras is None:
|
| 193 |
+
bs = len(images) // self.cfg.n_views
|
| 194 |
+
cameras = self.cameras[:self.cfg.n_views].repeat(bs, 1, 1).to(self.clip_model.device)
|
| 195 |
+
camera_embeds = self.encode_camera(cameras)
|
| 196 |
+
pixel_values = self.image_preprocess_clip.preprocess(images, return_tensors='pt', do_rescale=do_rescale).pixel_values
|
| 197 |
+
|
| 198 |
+
if force_none_camera_embeds:
|
| 199 |
+
camera_embeds = None
|
| 200 |
+
|
| 201 |
+
if pixel_values.ndim == 4:
|
| 202 |
+
pixel_values = pixel_values.unsqueeze(1)
|
| 203 |
+
if camera_embeds is not None:
|
| 204 |
+
camera_embeds = camera_embeds.unsqueeze(1)
|
| 205 |
+
|
| 206 |
+
if self.cfg.encode_camera and camera_embeds is not None:
|
| 207 |
+
vision_outputs = self.clip_model.vision_model(
|
| 208 |
+
pixel_values=rearrange(pixel_values.to(self.clip_model.device), "B N C H W -> (B N) C H W"),
|
| 209 |
+
condition=rearrange(camera_embeds, "B N C -> (B N) C")
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
else:
|
| 213 |
+
vision_outputs = self.clip_model.vision_model(
|
| 214 |
+
pixel_values=rearrange(pixel_values.to(self.clip_model.device), "B N C H W -> (B N) C H W"),
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
if return_dict:
|
| 218 |
+
# clip
|
| 219 |
+
pooler_output = vision_outputs[1] # pooled_output
|
| 220 |
+
image_features = self.clip_model.visual_projection(pooler_output)
|
| 221 |
+
|
| 222 |
+
clip_embeds = vision_outputs.last_hidden_state
|
| 223 |
+
|
| 224 |
+
clip_embeds_dict = CLIPEmbedOutput(
|
| 225 |
+
last_hidden_state=clip_embeds,
|
| 226 |
+
pooler_output=pooler_output,
|
| 227 |
+
embeds=image_features
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
return clip_embeds_dict
|
| 231 |
+
else:
|
| 232 |
+
return vision_outputs.last_hidden_state
|
| 233 |
+
|
| 234 |
+
def encode_image_dino(self, images: Iterable[Optional[ImageType]], cameras: Optional[torch.Tensor] = None, force_none_camera_embeds: bool = False, return_dict: bool = False, **kwargs) -> torch.FloatTensor:
|
| 235 |
+
camera_embeds = None
|
| 236 |
+
if isinstance(images, (np.ndarray, torch.Tensor)): # for training process
|
| 237 |
+
assert images.min() >= 0.0 and images.max() <= 1.0, "The pixel values should be in the range of [0, 1]"
|
| 238 |
+
do_rescale = False
|
| 239 |
+
if self.cfg.encode_camera:
|
| 240 |
+
assert cameras is not None, "The cameras should be provided"
|
| 241 |
+
camera_embeds = self.encode_camera(cameras)
|
| 242 |
+
pixel_values = self.transform_dino(images.permute(0, 3, 1, 2))
|
| 243 |
+
else: # for inference process
|
| 244 |
+
do_rescale = True
|
| 245 |
+
if self.cfg.encode_camera:
|
| 246 |
+
if cameras is None:
|
| 247 |
+
bs = len(images) // self.cfg.n_views
|
| 248 |
+
cameras = self.cameras[:self.cfg.n_views].repeat(bs, 1, 1).to(self.dino_model.device)
|
| 249 |
+
camera_embeds = self.encode_camera(cameras)
|
| 250 |
+
pixel_values = self.image_preprocess_dino.preprocess(images, return_tensors='pt', do_rescale=do_rescale).pixel_values
|
| 251 |
+
|
| 252 |
+
if force_none_camera_embeds:
|
| 253 |
+
camera_embeds = None
|
| 254 |
+
|
| 255 |
+
if pixel_values.ndim == 4:
|
| 256 |
+
pixel_values = pixel_values.unsqueeze(1)
|
| 257 |
+
if camera_embeds is not None:
|
| 258 |
+
camera_embeds = camera_embeds.unsqueeze(1)
|
| 259 |
+
|
| 260 |
+
if self.cfg.encode_camera and camera_embeds is not None:
|
| 261 |
+
vision_outputs = self.dino_model(
|
| 262 |
+
rearrange(pixel_values.to(self.dino_model.device), "B N C H W -> (B N) C H W"),
|
| 263 |
+
condition=rearrange(camera_embeds, "B N C -> (B N) C"),
|
| 264 |
+
)
|
| 265 |
+
else:
|
| 266 |
+
|
| 267 |
+
vision_outputs = self.dino_model(
|
| 268 |
+
rearrange(pixel_values.to(self.dino_model.device), "B N C H W -> (B N) C H W"),
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
if return_dict:
|
| 272 |
+
# dino
|
| 273 |
+
dino_embeds_dict = DINOEmbedOutput(
|
| 274 |
+
last_hidden_state=vision_outputs.last_hidden_state,
|
| 275 |
+
pooler_output=vision_outputs.pooler_output,
|
| 276 |
+
)
|
| 277 |
+
return dino_embeds_dict
|
| 278 |
+
else:
|
| 279 |
+
return vision_outputs.last_hidden_state
|
| 280 |
+
|
| 281 |
+
def encode_image(self, images: Iterable[Optional[ImageType]], cameras: Optional[torch.Tensor] = None, force_none_camera_embeds: bool = False, return_dict: bool = False, **kwargs) -> torch.FloatTensor:
|
| 282 |
+
clip_embeds = self.encode_image_clip(images, cameras)
|
| 283 |
+
dino_embeds = self.encode_image_dino(images, cameras)
|
| 284 |
+
dino_embeds = self.linear_proj(dino_embeds)
|
| 285 |
+
visual_embeds = torch.cat([clip_embeds, dino_embeds], dim=1)
|
| 286 |
+
return visual_embeds
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/__pycache__/modeling_conditional_dinov2.cpython-310.pyc
ADDED
|
Binary file (6.21 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc
ADDED
|
Binary file (27.5 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/modeling_conditional_dinov2.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Reference:
|
| 16 |
+
# * transformers/models/dinov2/modeling_dinov2.py
|
| 17 |
+
# * https://github.com/facebookresearch/DiT/blob/main/models.py#L101
|
| 18 |
+
# * https://github.com/3DTopia/OpenLRM/tree/main/openlrm/models/encoders/dinov2
|
| 19 |
+
""" PyTorch DINOv2 model."""
|
| 20 |
+
|
| 21 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn as nn
|
| 25 |
+
|
| 26 |
+
from .modeling_dinov2 import (
|
| 27 |
+
Dinov2Config,
|
| 28 |
+
Dinov2Layer,
|
| 29 |
+
Dinov2Model,
|
| 30 |
+
Dinov2Embeddings,
|
| 31 |
+
BaseModelOutput,
|
| 32 |
+
BaseModelOutputWithPooling,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class ModLN(nn.Module):
|
| 37 |
+
def __init__(self, inner_dim: int, mod_dim: int = 1024):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.mlp = nn.Sequential(
|
| 40 |
+
nn.SiLU(),
|
| 41 |
+
nn.Linear(mod_dim, inner_dim * 2),
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
for m in self.modules():
|
| 45 |
+
if isinstance(m, nn.Linear):
|
| 46 |
+
nn.init.zeros_(m.weight)
|
| 47 |
+
nn.init.zeros_(m.bias)
|
| 48 |
+
|
| 49 |
+
def forward(self, x:torch.Tensor, condition:torch.Tensor):
|
| 50 |
+
'''
|
| 51 |
+
x: [N, M, C_in], M: num of tokens
|
| 52 |
+
condition: [N, C_mod]
|
| 53 |
+
'''
|
| 54 |
+
shift, scale = self.mlp(condition).unsqueeze(1).chunk(2, dim=-1)
|
| 55 |
+
return x * (1 + scale) + shift
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ConditionalDinov2Config(Dinov2Config):
|
| 59 |
+
def __init__(self, modulation_dim: int = 1024, *args, **kwargs):
|
| 60 |
+
super().__init__(*args, **kwargs)
|
| 61 |
+
self.modulation_dim = modulation_dim
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class ConditionalDinov2Layer(Dinov2Layer):
|
| 65 |
+
"""This corresponds to the Block class in the original implementation."""
|
| 66 |
+
|
| 67 |
+
def __init__(self, config: ConditionalDinov2Config) -> None:
|
| 68 |
+
super().__init__(config)
|
| 69 |
+
self.mod_norm1 = ModLN(config.hidden_size, config.modulation_dim)
|
| 70 |
+
self.mod_norm2 = ModLN(config.hidden_size, config.modulation_dim)
|
| 71 |
+
|
| 72 |
+
def forward(
|
| 73 |
+
self,
|
| 74 |
+
hidden_states: torch.Tensor,
|
| 75 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 76 |
+
condition: Optional[torch.Tensor] = None,
|
| 77 |
+
output_attentions: bool = False,
|
| 78 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 79 |
+
self_attention_outputs = self.attention(
|
| 80 |
+
self.mod_norm1(self.norm1(hidden_states), condition), # in Dinov2, layernorm is applied before self-attention
|
| 81 |
+
head_mask,
|
| 82 |
+
output_attentions=output_attentions,
|
| 83 |
+
)
|
| 84 |
+
attention_output = self_attention_outputs[0]
|
| 85 |
+
|
| 86 |
+
attention_output = self.layer_scale1(attention_output)
|
| 87 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 88 |
+
|
| 89 |
+
# first residual connection
|
| 90 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
| 91 |
+
|
| 92 |
+
# in Dinov2, layernorm is also applied after self-attention
|
| 93 |
+
layer_output = self.mod_norm2(self.norm2(hidden_states), condition)
|
| 94 |
+
layer_output = self.mlp(layer_output)
|
| 95 |
+
layer_output = self.layer_scale2(layer_output)
|
| 96 |
+
|
| 97 |
+
# second residual connection
|
| 98 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
| 99 |
+
|
| 100 |
+
outputs = (layer_output,) + outputs
|
| 101 |
+
|
| 102 |
+
return outputs
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
|
| 106 |
+
class ConditionalDinov2Encoder(nn.Module):
|
| 107 |
+
def __init__(self, config: ConditionalDinov2Config) -> None:
|
| 108 |
+
super().__init__()
|
| 109 |
+
self.config = config
|
| 110 |
+
self.layer = nn.ModuleList([ConditionalDinov2Layer(config) for _ in range(config.num_hidden_layers)])
|
| 111 |
+
self.gradient_checkpointing = False
|
| 112 |
+
|
| 113 |
+
def forward(
|
| 114 |
+
self,
|
| 115 |
+
hidden_states: torch.Tensor,
|
| 116 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 117 |
+
output_attentions: bool = False,
|
| 118 |
+
output_hidden_states: bool = False,
|
| 119 |
+
condition: Optional[torch.Tensor] = None,
|
| 120 |
+
return_dict: bool = True,
|
| 121 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 122 |
+
all_hidden_states = () if output_hidden_states else None
|
| 123 |
+
all_self_attentions = () if output_attentions else None
|
| 124 |
+
|
| 125 |
+
for i, layer_module in enumerate(self.layer):
|
| 126 |
+
if output_hidden_states:
|
| 127 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 128 |
+
|
| 129 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 130 |
+
|
| 131 |
+
if self.gradient_checkpointing and self.training:
|
| 132 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 133 |
+
layer_module.__call__,
|
| 134 |
+
hidden_states,
|
| 135 |
+
layer_head_mask,
|
| 136 |
+
condition,
|
| 137 |
+
output_attentions,
|
| 138 |
+
)
|
| 139 |
+
else:
|
| 140 |
+
layer_outputs = layer_module(
|
| 141 |
+
hidden_states,
|
| 142 |
+
layer_head_mask,
|
| 143 |
+
condition,
|
| 144 |
+
output_attentions,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
hidden_states = layer_outputs[0]
|
| 148 |
+
|
| 149 |
+
if output_attentions:
|
| 150 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 151 |
+
|
| 152 |
+
if output_hidden_states:
|
| 153 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 154 |
+
|
| 155 |
+
if not return_dict:
|
| 156 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 157 |
+
return BaseModelOutput(
|
| 158 |
+
last_hidden_state=hidden_states,
|
| 159 |
+
hidden_states=all_hidden_states,
|
| 160 |
+
attentions=all_self_attentions,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class ConditionalDinov2Model(Dinov2Model):
|
| 165 |
+
config_class = ConditionalDinov2Config
|
| 166 |
+
def __init__(self, config: ConditionalDinov2Config):
|
| 167 |
+
super().__init__(config)
|
| 168 |
+
self.config = config
|
| 169 |
+
|
| 170 |
+
self.embeddings = Dinov2Embeddings(config)
|
| 171 |
+
self.encoder = ConditionalDinov2Encoder(config)
|
| 172 |
+
|
| 173 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 174 |
+
|
| 175 |
+
# Initialize weights and apply final processing
|
| 176 |
+
self.post_init()
|
| 177 |
+
|
| 178 |
+
def forward(
|
| 179 |
+
self,
|
| 180 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 181 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
| 182 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 183 |
+
condition: Optional[torch.Tensor] = None,
|
| 184 |
+
output_attentions: Optional[bool] = None,
|
| 185 |
+
output_hidden_states: Optional[bool] = None,
|
| 186 |
+
return_dict: Optional[bool] = None,
|
| 187 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 188 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 189 |
+
output_hidden_states = (
|
| 190 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 191 |
+
)
|
| 192 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 193 |
+
|
| 194 |
+
if pixel_values is None:
|
| 195 |
+
raise ValueError("You have to specify pixel_values")
|
| 196 |
+
|
| 197 |
+
# Prepare head mask if needed
|
| 198 |
+
# 1.0 in head_mask indicate we keep the head
|
| 199 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 200 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 201 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 202 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 203 |
+
|
| 204 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 205 |
+
|
| 206 |
+
encoder_outputs = self.encoder(
|
| 207 |
+
embedding_output,
|
| 208 |
+
head_mask=head_mask,
|
| 209 |
+
output_attentions=output_attentions,
|
| 210 |
+
output_hidden_states=output_hidden_states,
|
| 211 |
+
condition=condition,
|
| 212 |
+
return_dict=return_dict,
|
| 213 |
+
)
|
| 214 |
+
sequence_output = encoder_outputs[0]
|
| 215 |
+
sequence_output = self.layernorm(sequence_output)
|
| 216 |
+
pooled_output = sequence_output[:, 0, :]
|
| 217 |
+
|
| 218 |
+
if not return_dict:
|
| 219 |
+
head_outputs = (sequence_output, pooled_output)
|
| 220 |
+
return head_outputs + encoder_outputs[1:]
|
| 221 |
+
|
| 222 |
+
return BaseModelOutputWithPooling(
|
| 223 |
+
last_hidden_state=sequence_output,
|
| 224 |
+
pooler_output=pooled_output,
|
| 225 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 226 |
+
attentions=encoder_outputs.attentions,
|
| 227 |
+
)
|
| 228 |
+
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2/modeling_dinov2.py
ADDED
|
@@ -0,0 +1,859 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch DINOv2 model."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import collections.abc
|
| 19 |
+
import math
|
| 20 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
+
|
| 27 |
+
from transformers.activations import ACT2FN
|
| 28 |
+
from transformers.modeling_outputs import (
|
| 29 |
+
BackboneOutput,
|
| 30 |
+
BaseModelOutput,
|
| 31 |
+
BaseModelOutputWithPooling,
|
| 32 |
+
ImageClassifierOutput,
|
| 33 |
+
)
|
| 34 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 35 |
+
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
| 36 |
+
from transformers.utils import (
|
| 37 |
+
add_code_sample_docstrings,
|
| 38 |
+
add_start_docstrings,
|
| 39 |
+
add_start_docstrings_to_model_forward,
|
| 40 |
+
logging,
|
| 41 |
+
replace_return_docstrings,
|
| 42 |
+
)
|
| 43 |
+
from transformers.utils.backbone_utils import BackboneMixin
|
| 44 |
+
from transformers.models.dinov2.configuration_dinov2 import Dinov2Config
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
# General docstring
|
| 50 |
+
_CONFIG_FOR_DOC = "Dinov2Config"
|
| 51 |
+
|
| 52 |
+
# Base docstring
|
| 53 |
+
_CHECKPOINT_FOR_DOC = "facebook/dinov2-base"
|
| 54 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
|
| 55 |
+
|
| 56 |
+
# Image classification docstring
|
| 57 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer"
|
| 58 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 62 |
+
"facebook/dinov2-base",
|
| 63 |
+
# See all DINOv2 models at https://huggingface.co/models?filter=dinov2
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class Dinov2Embeddings(nn.Module):
|
| 68 |
+
"""
|
| 69 |
+
Construct the CLS token, mask token, position and patch embeddings.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 73 |
+
super().__init__()
|
| 74 |
+
|
| 75 |
+
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
|
| 76 |
+
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
|
| 77 |
+
self.patch_embeddings = Dinov2PatchEmbeddings(config)
|
| 78 |
+
num_patches = self.patch_embeddings.num_patches
|
| 79 |
+
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
|
| 80 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 81 |
+
self.config = config
|
| 82 |
+
|
| 83 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
| 84 |
+
"""
|
| 85 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
|
| 86 |
+
resolution images.
|
| 87 |
+
|
| 88 |
+
Source:
|
| 89 |
+
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
num_patches = embeddings.shape[1] - 1
|
| 93 |
+
num_positions = self.position_embeddings.shape[1] - 1
|
| 94 |
+
if num_patches == num_positions and height == width:
|
| 95 |
+
return self.position_embeddings
|
| 96 |
+
class_pos_embed = self.position_embeddings[:, 0]
|
| 97 |
+
patch_pos_embed = self.position_embeddings[:, 1:]
|
| 98 |
+
dim = embeddings.shape[-1]
|
| 99 |
+
height = height // self.config.patch_size
|
| 100 |
+
width = width // self.config.patch_size
|
| 101 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 102 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 103 |
+
height, width = height + 0.1, width + 0.1
|
| 104 |
+
patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
|
| 105 |
+
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
| 106 |
+
target_dtype = patch_pos_embed.dtype
|
| 107 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 108 |
+
patch_pos_embed.to(dtype=torch.float32),
|
| 109 |
+
scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))),
|
| 110 |
+
mode="bicubic",
|
| 111 |
+
align_corners=False,
|
| 112 |
+
).to(dtype=target_dtype)
|
| 113 |
+
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
|
| 114 |
+
raise ValueError("Width or height does not match with the interpolated position embeddings")
|
| 115 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 116 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
|
| 117 |
+
|
| 118 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 119 |
+
batch_size, _, height, width = pixel_values.shape
|
| 120 |
+
target_dtype = self.patch_embeddings.projection.weight.dtype
|
| 121 |
+
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
|
| 122 |
+
|
| 123 |
+
if bool_masked_pos is not None:
|
| 124 |
+
embeddings = torch.where(
|
| 125 |
+
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# add the [CLS] token to the embedded patch tokens
|
| 129 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 130 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
| 131 |
+
|
| 132 |
+
# add positional encoding to each token
|
| 133 |
+
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
| 134 |
+
|
| 135 |
+
embeddings = self.dropout(embeddings)
|
| 136 |
+
|
| 137 |
+
return embeddings
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class Dinov2PatchEmbeddings(nn.Module):
|
| 141 |
+
"""
|
| 142 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
| 143 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
| 144 |
+
Transformer.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
def __init__(self, config):
|
| 148 |
+
super().__init__()
|
| 149 |
+
image_size, patch_size = config.image_size, config.patch_size
|
| 150 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
| 151 |
+
|
| 152 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
| 153 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
| 154 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
| 155 |
+
self.image_size = image_size
|
| 156 |
+
self.patch_size = patch_size
|
| 157 |
+
self.num_channels = num_channels
|
| 158 |
+
self.num_patches = num_patches
|
| 159 |
+
|
| 160 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
| 161 |
+
|
| 162 |
+
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
| 163 |
+
num_channels = pixel_values.shape[1]
|
| 164 |
+
if num_channels != self.num_channels:
|
| 165 |
+
raise ValueError(
|
| 166 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 167 |
+
f" Expected {self.num_channels} but got {num_channels}."
|
| 168 |
+
)
|
| 169 |
+
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
|
| 170 |
+
return embeddings
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2
|
| 174 |
+
class Dinov2SelfAttention(nn.Module):
|
| 175 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 176 |
+
super().__init__()
|
| 177 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 178 |
+
raise ValueError(
|
| 179 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
| 180 |
+
f"heads {config.num_attention_heads}."
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
self.num_attention_heads = config.num_attention_heads
|
| 184 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 185 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 186 |
+
|
| 187 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 188 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 189 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 190 |
+
|
| 191 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 192 |
+
|
| 193 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
| 194 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 195 |
+
x = x.view(new_x_shape)
|
| 196 |
+
return x.permute(0, 2, 1, 3)
|
| 197 |
+
|
| 198 |
+
def forward(
|
| 199 |
+
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
|
| 200 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 201 |
+
mixed_query_layer = self.query(hidden_states)
|
| 202 |
+
|
| 203 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 204 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 205 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 206 |
+
|
| 207 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 208 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 209 |
+
|
| 210 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 211 |
+
|
| 212 |
+
# Normalize the attention scores to probabilities.
|
| 213 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 214 |
+
|
| 215 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 216 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 217 |
+
attention_probs = self.dropout(attention_probs)
|
| 218 |
+
|
| 219 |
+
# Mask heads if we want to
|
| 220 |
+
if head_mask is not None:
|
| 221 |
+
attention_probs = attention_probs * head_mask
|
| 222 |
+
|
| 223 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 224 |
+
|
| 225 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 226 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 227 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 228 |
+
|
| 229 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 230 |
+
|
| 231 |
+
return outputs
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
|
| 235 |
+
class Dinov2SelfOutput(nn.Module):
|
| 236 |
+
"""
|
| 237 |
+
The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
|
| 238 |
+
layernorm applied before each block.
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 244 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 245 |
+
|
| 246 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 247 |
+
hidden_states = self.dense(hidden_states)
|
| 248 |
+
hidden_states = self.dropout(hidden_states)
|
| 249 |
+
|
| 250 |
+
return hidden_states
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2
|
| 254 |
+
class Dinov2Attention(nn.Module):
|
| 255 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.attention = Dinov2SelfAttention(config)
|
| 258 |
+
self.output = Dinov2SelfOutput(config)
|
| 259 |
+
self.pruned_heads = set()
|
| 260 |
+
|
| 261 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
| 262 |
+
if len(heads) == 0:
|
| 263 |
+
return
|
| 264 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 265 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# Prune linear layers
|
| 269 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
| 270 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
| 271 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
| 272 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 273 |
+
|
| 274 |
+
# Update hyper params and store pruned heads
|
| 275 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
| 276 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
| 277 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 278 |
+
|
| 279 |
+
def forward(
|
| 280 |
+
self,
|
| 281 |
+
hidden_states: torch.Tensor,
|
| 282 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 283 |
+
output_attentions: bool = False,
|
| 284 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 285 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions)
|
| 286 |
+
|
| 287 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 288 |
+
|
| 289 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 290 |
+
return outputs
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class Dinov2LayerScale(nn.Module):
|
| 294 |
+
def __init__(self, config) -> None:
|
| 295 |
+
super().__init__()
|
| 296 |
+
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
|
| 297 |
+
|
| 298 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 299 |
+
return hidden_state * self.lambda1
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# Copied from transformers.models.beit.modeling_beit.drop_path
|
| 303 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 304 |
+
"""
|
| 305 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 306 |
+
|
| 307 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 308 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 309 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 310 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 311 |
+
argument.
|
| 312 |
+
"""
|
| 313 |
+
if drop_prob == 0.0 or not training:
|
| 314 |
+
return input
|
| 315 |
+
keep_prob = 1 - drop_prob
|
| 316 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 317 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 318 |
+
random_tensor.floor_() # binarize
|
| 319 |
+
output = input.div(keep_prob) * random_tensor
|
| 320 |
+
return output
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
# Copied from transformers.models.beit.modeling_beit.BeitDropPath
|
| 324 |
+
class Dinov2DropPath(nn.Module):
|
| 325 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 326 |
+
|
| 327 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 328 |
+
super().__init__()
|
| 329 |
+
self.drop_prob = drop_prob
|
| 330 |
+
|
| 331 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 332 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 333 |
+
|
| 334 |
+
def extra_repr(self) -> str:
|
| 335 |
+
return "p={}".format(self.drop_prob)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class Dinov2MLP(nn.Module):
|
| 339 |
+
def __init__(self, config) -> None:
|
| 340 |
+
super().__init__()
|
| 341 |
+
in_features = out_features = config.hidden_size
|
| 342 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
| 343 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
|
| 344 |
+
if isinstance(config.hidden_act, str):
|
| 345 |
+
self.activation = ACT2FN[config.hidden_act]
|
| 346 |
+
else:
|
| 347 |
+
self.activation = config.hidden_act
|
| 348 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
|
| 349 |
+
|
| 350 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 351 |
+
hidden_state = self.fc1(hidden_state)
|
| 352 |
+
hidden_state = self.activation(hidden_state)
|
| 353 |
+
hidden_state = self.fc2(hidden_state)
|
| 354 |
+
return hidden_state
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class Dinov2SwiGLUFFN(nn.Module):
|
| 358 |
+
def __init__(self, config) -> None:
|
| 359 |
+
super().__init__()
|
| 360 |
+
in_features = out_features = config.hidden_size
|
| 361 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
| 362 |
+
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
|
| 363 |
+
|
| 364 |
+
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
|
| 365 |
+
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
|
| 366 |
+
|
| 367 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 368 |
+
hidden_state = self.weights_in(hidden_state)
|
| 369 |
+
x1, x2 = hidden_state.chunk(2, dim=-1)
|
| 370 |
+
hidden = nn.functional.silu(x1) * x2
|
| 371 |
+
return self.weights_out(hidden)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
class Dinov2Layer(nn.Module):
|
| 375 |
+
"""This corresponds to the Block class in the original implementation."""
|
| 376 |
+
|
| 377 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 378 |
+
super().__init__()
|
| 379 |
+
|
| 380 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 381 |
+
self.attention = Dinov2Attention(config)
|
| 382 |
+
self.layer_scale1 = Dinov2LayerScale(config)
|
| 383 |
+
self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
|
| 384 |
+
|
| 385 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 386 |
+
|
| 387 |
+
if config.use_swiglu_ffn:
|
| 388 |
+
self.mlp = Dinov2SwiGLUFFN(config)
|
| 389 |
+
else:
|
| 390 |
+
self.mlp = Dinov2MLP(config)
|
| 391 |
+
self.layer_scale2 = Dinov2LayerScale(config)
|
| 392 |
+
|
| 393 |
+
def forward(
|
| 394 |
+
self,
|
| 395 |
+
hidden_states: torch.Tensor,
|
| 396 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 397 |
+
output_attentions: bool = False,
|
| 398 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 399 |
+
self_attention_outputs = self.attention(
|
| 400 |
+
self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention
|
| 401 |
+
head_mask,
|
| 402 |
+
output_attentions=output_attentions,
|
| 403 |
+
)
|
| 404 |
+
attention_output = self_attention_outputs[0]
|
| 405 |
+
|
| 406 |
+
attention_output = self.layer_scale1(attention_output)
|
| 407 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 408 |
+
|
| 409 |
+
# first residual connection
|
| 410 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
| 411 |
+
|
| 412 |
+
# in Dinov2, layernorm is also applied after self-attention
|
| 413 |
+
layer_output = self.norm2(hidden_states)
|
| 414 |
+
layer_output = self.mlp(layer_output)
|
| 415 |
+
layer_output = self.layer_scale2(layer_output)
|
| 416 |
+
|
| 417 |
+
# second residual connection
|
| 418 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
| 419 |
+
|
| 420 |
+
outputs = (layer_output,) + outputs
|
| 421 |
+
|
| 422 |
+
return outputs
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
|
| 426 |
+
class Dinov2Encoder(nn.Module):
|
| 427 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 428 |
+
super().__init__()
|
| 429 |
+
self.config = config
|
| 430 |
+
self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
|
| 431 |
+
self.gradient_checkpointing = False
|
| 432 |
+
|
| 433 |
+
def forward(
|
| 434 |
+
self,
|
| 435 |
+
hidden_states: torch.Tensor,
|
| 436 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 437 |
+
output_attentions: bool = False,
|
| 438 |
+
output_hidden_states: bool = False,
|
| 439 |
+
return_dict: bool = True,
|
| 440 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 441 |
+
all_hidden_states = () if output_hidden_states else None
|
| 442 |
+
all_self_attentions = () if output_attentions else None
|
| 443 |
+
|
| 444 |
+
for i, layer_module in enumerate(self.layer):
|
| 445 |
+
if output_hidden_states:
|
| 446 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 447 |
+
|
| 448 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 449 |
+
|
| 450 |
+
if self.gradient_checkpointing and self.training:
|
| 451 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 452 |
+
layer_module.__call__,
|
| 453 |
+
hidden_states,
|
| 454 |
+
layer_head_mask,
|
| 455 |
+
output_attentions,
|
| 456 |
+
)
|
| 457 |
+
else:
|
| 458 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
|
| 459 |
+
|
| 460 |
+
hidden_states = layer_outputs[0]
|
| 461 |
+
|
| 462 |
+
if output_attentions:
|
| 463 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 464 |
+
|
| 465 |
+
if output_hidden_states:
|
| 466 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 467 |
+
|
| 468 |
+
if not return_dict:
|
| 469 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 470 |
+
return BaseModelOutput(
|
| 471 |
+
last_hidden_state=hidden_states,
|
| 472 |
+
hidden_states=all_hidden_states,
|
| 473 |
+
attentions=all_self_attentions,
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
class Dinov2PreTrainedModel(PreTrainedModel):
|
| 478 |
+
"""
|
| 479 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 480 |
+
models.
|
| 481 |
+
"""
|
| 482 |
+
|
| 483 |
+
config_class = Dinov2Config
|
| 484 |
+
base_model_prefix = "dinov2"
|
| 485 |
+
main_input_name = "pixel_values"
|
| 486 |
+
supports_gradient_checkpointing = True
|
| 487 |
+
|
| 488 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
| 489 |
+
"""Initialize the weights"""
|
| 490 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 491 |
+
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
|
| 492 |
+
# `trunc_normal_cpu` not implemented in `half` issues
|
| 493 |
+
module.weight.data = nn.init.trunc_normal_(
|
| 494 |
+
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
|
| 495 |
+
).to(module.weight.dtype)
|
| 496 |
+
if module.bias is not None:
|
| 497 |
+
module.bias.data.zero_()
|
| 498 |
+
elif isinstance(module, nn.LayerNorm):
|
| 499 |
+
module.bias.data.zero_()
|
| 500 |
+
module.weight.data.fill_(1.0)
|
| 501 |
+
elif isinstance(module, Dinov2Embeddings):
|
| 502 |
+
module.position_embeddings.data = nn.init.trunc_normal_(
|
| 503 |
+
module.position_embeddings.data.to(torch.float32),
|
| 504 |
+
mean=0.0,
|
| 505 |
+
std=self.config.initializer_range,
|
| 506 |
+
).to(module.position_embeddings.dtype)
|
| 507 |
+
|
| 508 |
+
module.cls_token.data = nn.init.trunc_normal_(
|
| 509 |
+
module.cls_token.data.to(torch.float32),
|
| 510 |
+
mean=0.0,
|
| 511 |
+
std=self.config.initializer_range,
|
| 512 |
+
).to(module.cls_token.dtype)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
DINOV2_START_DOCSTRING = r"""
|
| 516 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
| 517 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 518 |
+
behavior.
|
| 519 |
+
|
| 520 |
+
Parameters:
|
| 521 |
+
config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.
|
| 522 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 523 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
DINOV2_BASE_INPUTS_DOCSTRING = r"""
|
| 527 |
+
Args:
|
| 528 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 529 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 530 |
+
[`BitImageProcessor.preprocess`] for details.
|
| 531 |
+
|
| 532 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
|
| 533 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
|
| 534 |
+
pre-training.
|
| 535 |
+
|
| 536 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 537 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 538 |
+
|
| 539 |
+
- 1 indicates the head is **not masked**,
|
| 540 |
+
- 0 indicates the head is **masked**.
|
| 541 |
+
|
| 542 |
+
output_attentions (`bool`, *optional*):
|
| 543 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 544 |
+
tensors for more detail.
|
| 545 |
+
output_hidden_states (`bool`, *optional*):
|
| 546 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 547 |
+
more detail.
|
| 548 |
+
return_dict (`bool`, *optional*):
|
| 549 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 550 |
+
"""
|
| 551 |
+
|
| 552 |
+
DINOV2_INPUTS_DOCSTRING = r"""
|
| 553 |
+
Args:
|
| 554 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 555 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 556 |
+
[`BitImageProcessor.preprocess`] for details.
|
| 557 |
+
|
| 558 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 559 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 560 |
+
|
| 561 |
+
- 1 indicates the head is **not masked**,
|
| 562 |
+
- 0 indicates the head is **masked**.
|
| 563 |
+
|
| 564 |
+
output_attentions (`bool`, *optional*):
|
| 565 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 566 |
+
tensors for more detail.
|
| 567 |
+
output_hidden_states (`bool`, *optional*):
|
| 568 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 569 |
+
more detail.
|
| 570 |
+
return_dict (`bool`, *optional*):
|
| 571 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 572 |
+
"""
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
@add_start_docstrings(
|
| 576 |
+
"The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.",
|
| 577 |
+
DINOV2_START_DOCSTRING,
|
| 578 |
+
)
|
| 579 |
+
class Dinov2Model(Dinov2PreTrainedModel):
|
| 580 |
+
def __init__(self, config: Dinov2Config):
|
| 581 |
+
super().__init__(config)
|
| 582 |
+
self.config = config
|
| 583 |
+
|
| 584 |
+
self.embeddings = Dinov2Embeddings(config)
|
| 585 |
+
self.encoder = Dinov2Encoder(config)
|
| 586 |
+
|
| 587 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 588 |
+
|
| 589 |
+
# Initialize weights and apply final processing
|
| 590 |
+
self.post_init()
|
| 591 |
+
|
| 592 |
+
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
|
| 593 |
+
return self.embeddings.patch_embeddings
|
| 594 |
+
|
| 595 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
| 596 |
+
"""
|
| 597 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 598 |
+
class PreTrainedModel
|
| 599 |
+
"""
|
| 600 |
+
for layer, heads in heads_to_prune.items():
|
| 601 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 602 |
+
|
| 603 |
+
@add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING)
|
| 604 |
+
@add_code_sample_docstrings(
|
| 605 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 606 |
+
output_type=BaseModelOutputWithPooling,
|
| 607 |
+
config_class=_CONFIG_FOR_DOC,
|
| 608 |
+
modality="vision",
|
| 609 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 610 |
+
)
|
| 611 |
+
def forward(
|
| 612 |
+
self,
|
| 613 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 614 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
| 615 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 616 |
+
output_attentions: Optional[bool] = None,
|
| 617 |
+
output_hidden_states: Optional[bool] = None,
|
| 618 |
+
return_dict: Optional[bool] = None,
|
| 619 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 620 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 621 |
+
output_hidden_states = (
|
| 622 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 623 |
+
)
|
| 624 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 625 |
+
|
| 626 |
+
if pixel_values is None:
|
| 627 |
+
raise ValueError("You have to specify pixel_values")
|
| 628 |
+
|
| 629 |
+
# Prepare head mask if needed
|
| 630 |
+
# 1.0 in head_mask indicate we keep the head
|
| 631 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 632 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 633 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 634 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 635 |
+
|
| 636 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 637 |
+
|
| 638 |
+
encoder_outputs = self.encoder(
|
| 639 |
+
embedding_output,
|
| 640 |
+
head_mask=head_mask,
|
| 641 |
+
output_attentions=output_attentions,
|
| 642 |
+
output_hidden_states=output_hidden_states,
|
| 643 |
+
return_dict=return_dict,
|
| 644 |
+
)
|
| 645 |
+
sequence_output = encoder_outputs[0]
|
| 646 |
+
sequence_output = self.layernorm(sequence_output)
|
| 647 |
+
pooled_output = sequence_output[:, 0, :]
|
| 648 |
+
|
| 649 |
+
if not return_dict:
|
| 650 |
+
head_outputs = (sequence_output, pooled_output)
|
| 651 |
+
return head_outputs + encoder_outputs[1:]
|
| 652 |
+
|
| 653 |
+
return BaseModelOutputWithPooling(
|
| 654 |
+
last_hidden_state=sequence_output,
|
| 655 |
+
pooler_output=pooled_output,
|
| 656 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 657 |
+
attentions=encoder_outputs.attentions,
|
| 658 |
+
)
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
@add_start_docstrings(
|
| 662 |
+
"""
|
| 663 |
+
Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
|
| 664 |
+
of the [CLS] token) e.g. for ImageNet.
|
| 665 |
+
""",
|
| 666 |
+
DINOV2_START_DOCSTRING,
|
| 667 |
+
)
|
| 668 |
+
class Dinov2ForImageClassification(Dinov2PreTrainedModel):
|
| 669 |
+
def __init__(self, config: Dinov2Config) -> None:
|
| 670 |
+
super().__init__(config)
|
| 671 |
+
|
| 672 |
+
self.num_labels = config.num_labels
|
| 673 |
+
self.dinov2 = Dinov2Model(config)
|
| 674 |
+
|
| 675 |
+
# Classifier head
|
| 676 |
+
self.classifier = (
|
| 677 |
+
nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
# Initialize weights and apply final processing
|
| 681 |
+
self.post_init()
|
| 682 |
+
|
| 683 |
+
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
|
| 684 |
+
@add_code_sample_docstrings(
|
| 685 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 686 |
+
output_type=ImageClassifierOutput,
|
| 687 |
+
config_class=_CONFIG_FOR_DOC,
|
| 688 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 689 |
+
)
|
| 690 |
+
def forward(
|
| 691 |
+
self,
|
| 692 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 693 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 694 |
+
labels: Optional[torch.Tensor] = None,
|
| 695 |
+
output_attentions: Optional[bool] = None,
|
| 696 |
+
output_hidden_states: Optional[bool] = None,
|
| 697 |
+
return_dict: Optional[bool] = None,
|
| 698 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
| 699 |
+
r"""
|
| 700 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 701 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 702 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 703 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 704 |
+
"""
|
| 705 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 706 |
+
|
| 707 |
+
outputs = self.dinov2(
|
| 708 |
+
pixel_values,
|
| 709 |
+
head_mask=head_mask,
|
| 710 |
+
output_attentions=output_attentions,
|
| 711 |
+
output_hidden_states=output_hidden_states,
|
| 712 |
+
return_dict=return_dict,
|
| 713 |
+
)
|
| 714 |
+
|
| 715 |
+
sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
|
| 716 |
+
|
| 717 |
+
cls_token = sequence_output[:, 0]
|
| 718 |
+
patch_tokens = sequence_output[:, 1:]
|
| 719 |
+
|
| 720 |
+
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
|
| 721 |
+
|
| 722 |
+
logits = self.classifier(linear_input)
|
| 723 |
+
|
| 724 |
+
loss = None
|
| 725 |
+
if labels is not None:
|
| 726 |
+
# move labels to correct device to enable model parallelism
|
| 727 |
+
labels = labels.to(logits.device)
|
| 728 |
+
if self.config.problem_type is None:
|
| 729 |
+
if self.num_labels == 1:
|
| 730 |
+
self.config.problem_type = "regression"
|
| 731 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 732 |
+
self.config.problem_type = "single_label_classification"
|
| 733 |
+
else:
|
| 734 |
+
self.config.problem_type = "multi_label_classification"
|
| 735 |
+
|
| 736 |
+
if self.config.problem_type == "regression":
|
| 737 |
+
loss_fct = MSELoss()
|
| 738 |
+
if self.num_labels == 1:
|
| 739 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 740 |
+
else:
|
| 741 |
+
loss = loss_fct(logits, labels)
|
| 742 |
+
elif self.config.problem_type == "single_label_classification":
|
| 743 |
+
loss_fct = CrossEntropyLoss()
|
| 744 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 745 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 746 |
+
loss_fct = BCEWithLogitsLoss()
|
| 747 |
+
loss = loss_fct(logits, labels)
|
| 748 |
+
|
| 749 |
+
if not return_dict:
|
| 750 |
+
output = (logits,) + outputs[2:]
|
| 751 |
+
return ((loss,) + output) if loss is not None else output
|
| 752 |
+
|
| 753 |
+
return ImageClassifierOutput(
|
| 754 |
+
loss=loss,
|
| 755 |
+
logits=logits,
|
| 756 |
+
hidden_states=outputs.hidden_states,
|
| 757 |
+
attentions=outputs.attentions,
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
@add_start_docstrings(
|
| 762 |
+
"""
|
| 763 |
+
Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.
|
| 764 |
+
""",
|
| 765 |
+
DINOV2_START_DOCSTRING,
|
| 766 |
+
)
|
| 767 |
+
class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
|
| 768 |
+
def __init__(self, config):
|
| 769 |
+
super().__init__(config)
|
| 770 |
+
super()._init_backbone(config)
|
| 771 |
+
|
| 772 |
+
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
|
| 773 |
+
self.embeddings = Dinov2Embeddings(config)
|
| 774 |
+
self.encoder = Dinov2Encoder(config)
|
| 775 |
+
|
| 776 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 777 |
+
|
| 778 |
+
# Initialize weights and apply final processing
|
| 779 |
+
self.post_init()
|
| 780 |
+
|
| 781 |
+
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
|
| 782 |
+
return self.embeddings.patch_embeddings
|
| 783 |
+
|
| 784 |
+
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
|
| 785 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 786 |
+
def forward(
|
| 787 |
+
self,
|
| 788 |
+
pixel_values: torch.Tensor,
|
| 789 |
+
output_hidden_states: Optional[bool] = None,
|
| 790 |
+
output_attentions: Optional[bool] = None,
|
| 791 |
+
return_dict: Optional[bool] = None,
|
| 792 |
+
) -> BackboneOutput:
|
| 793 |
+
"""
|
| 794 |
+
Returns:
|
| 795 |
+
|
| 796 |
+
Examples:
|
| 797 |
+
|
| 798 |
+
```python
|
| 799 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 800 |
+
>>> import torch
|
| 801 |
+
>>> from PIL import Image
|
| 802 |
+
>>> import requests
|
| 803 |
+
|
| 804 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 805 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 806 |
+
|
| 807 |
+
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
|
| 808 |
+
>>> model = AutoBackbone.from_pretrained(
|
| 809 |
+
... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
|
| 810 |
+
... )
|
| 811 |
+
|
| 812 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 813 |
+
|
| 814 |
+
>>> outputs = model(**inputs)
|
| 815 |
+
>>> feature_maps = outputs.feature_maps
|
| 816 |
+
>>> list(feature_maps[-1].shape)
|
| 817 |
+
[1, 768, 16, 16]
|
| 818 |
+
```"""
|
| 819 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 820 |
+
output_hidden_states = (
|
| 821 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 822 |
+
)
|
| 823 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 824 |
+
|
| 825 |
+
embedding_output = self.embeddings(pixel_values)
|
| 826 |
+
|
| 827 |
+
outputs = self.encoder(
|
| 828 |
+
embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
|
| 829 |
+
)
|
| 830 |
+
|
| 831 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
| 832 |
+
|
| 833 |
+
feature_maps = ()
|
| 834 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
| 835 |
+
if stage in self.out_features:
|
| 836 |
+
if self.config.apply_layernorm:
|
| 837 |
+
hidden_state = self.layernorm(hidden_state)
|
| 838 |
+
if self.config.reshape_hidden_states:
|
| 839 |
+
hidden_state = hidden_state[:, 1:]
|
| 840 |
+
# this was actually a bug in the original implementation that we copied here,
|
| 841 |
+
# cause normally the order is height, width
|
| 842 |
+
batch_size, _, height, width = pixel_values.shape
|
| 843 |
+
patch_size = self.config.patch_size
|
| 844 |
+
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
|
| 845 |
+
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
|
| 846 |
+
feature_maps += (hidden_state,)
|
| 847 |
+
|
| 848 |
+
if not return_dict:
|
| 849 |
+
if output_hidden_states:
|
| 850 |
+
output = (feature_maps,) + outputs[1:]
|
| 851 |
+
else:
|
| 852 |
+
output = (feature_maps,) + outputs[2:]
|
| 853 |
+
return output
|
| 854 |
+
|
| 855 |
+
return BackboneOutput(
|
| 856 |
+
feature_maps=feature_maps,
|
| 857 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 858 |
+
attentions=outputs.attentions if output_attentions else None,
|
| 859 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_encoder.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
import numpy as np
|
| 5 |
+
import re
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
|
| 10 |
+
from transformers import AutoImageProcessor, AutoModel
|
| 11 |
+
from transformers.utils import ModelOutput
|
| 12 |
+
from typing import Iterable, Optional, Union, List
|
| 13 |
+
|
| 14 |
+
import craftsman
|
| 15 |
+
from craftsman.utils.typing import *
|
| 16 |
+
from .base import BaseEmbedder, ImageType
|
| 17 |
+
from .dinov2.modeling_dinov2 import Dinov2Model
|
| 18 |
+
from .dinov2.modeling_conditional_dinov2 import ConditionalDinov2Model
|
| 19 |
+
from .dinov2_with_registers.modeling_dinov2_with_registers import Dinov2WithRegistersModel
|
| 20 |
+
|
| 21 |
+
class DINOEmbedOutput(ModelOutput):
|
| 22 |
+
last_hidden_state: torch.FloatTensor = None
|
| 23 |
+
pooler_output: torch.FloatTensor = None
|
| 24 |
+
|
| 25 |
+
@craftsman.register("dinov2-embedder")
|
| 26 |
+
class Dinov2Embedder(BaseEmbedder):
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class Config(BaseEmbedder.Config):
|
| 30 |
+
pretrained_model_name_or_path: Optional[str] = None # the pretrained model name or path for condition model
|
| 31 |
+
pretrained_dino_name_or_path: Optional[str] = None # the pretrained model name or path for dino
|
| 32 |
+
freeze_modulation_dino: bool = False
|
| 33 |
+
enable_gradient_checkpointing: bool = False
|
| 34 |
+
image_size_dino: int = 224
|
| 35 |
+
dino_type: Optional[str] = None
|
| 36 |
+
kwargs: Optional[dict] = None
|
| 37 |
+
|
| 38 |
+
cfg: Config
|
| 39 |
+
|
| 40 |
+
def configure(self) -> None:
|
| 41 |
+
super().configure()
|
| 42 |
+
|
| 43 |
+
# Load the DINOV2 model and processor
|
| 44 |
+
if not self.cfg.encode_camera:
|
| 45 |
+
if self.cfg.pretrained_dino_name_or_path is not None:
|
| 46 |
+
self.dino_type = self.cfg.pretrained_dino_name_or_path
|
| 47 |
+
if self.cfg.kwargs is not None:
|
| 48 |
+
self.dino_model: Dinov2Model = AutoModel.from_pretrained(self.cfg.pretrained_dino_name_or_path, **self.cfg.kwargs)
|
| 49 |
+
else:
|
| 50 |
+
self.dino_model: Dinov2Model = AutoModel.from_pretrained(self.cfg.pretrained_dino_name_or_path)
|
| 51 |
+
else:
|
| 52 |
+
if self.cfg.pretrained_model_name_or_path is None: # default to load Dinov2-base model
|
| 53 |
+
assert self.cfg.dino_type is not None, "The dino_type should be provided"
|
| 54 |
+
print(f"Loading Dinov2 model from {self.cfg.dino_type}")
|
| 55 |
+
self.dino_type = f"facebook/{self.cfg.dino_type}"
|
| 56 |
+
if "reg" in self.cfg.dino_type:
|
| 57 |
+
self.dino_model: Dinov2WithRegistersModel = Dinov2WithRegistersModel(config=Dinov2WithRegistersModel.config_class.from_pretrained(
|
| 58 |
+
self.dino_type,
|
| 59 |
+
))
|
| 60 |
+
else:
|
| 61 |
+
self.dino_model: Dinov2Model = Dinov2Model(config=Dinov2Model.config_class.from_pretrained(
|
| 62 |
+
self.dino_type,
|
| 63 |
+
))
|
| 64 |
+
elif "dinov2base" in self.cfg.pretrained_model_name_or_path:
|
| 65 |
+
print("Loading Dinov2 model from facebook/dinov2-base")
|
| 66 |
+
self.dino_type = "facebook/dinov2-base"
|
| 67 |
+
self.dino_model: Dinov2Model = Dinov2Model(config=Dinov2Model.config_class.from_pretrained(
|
| 68 |
+
"facebook/dinov2-base",
|
| 69 |
+
))
|
| 70 |
+
elif "dinov2regbase" in self.cfg.pretrained_model_name_or_path:
|
| 71 |
+
print("Loading Dinov2 model from facebook/dinov2-with-registers-base")
|
| 72 |
+
self.dino_type = "facebook/dinov2-with-registers-base"
|
| 73 |
+
self.dino_model: Dinov2WithRegistersModel = Dinov2WithRegistersModel(config=Dinov2WithRegistersModel.config_class.from_pretrained(
|
| 74 |
+
"facebook/dinov2-with-registers-base",
|
| 75 |
+
))
|
| 76 |
+
elif "dinov2reglarge" in self.cfg.pretrained_model_name_or_path:
|
| 77 |
+
print("Loading Dinov2 model from facebook/dinov2-with-registers-large")
|
| 78 |
+
self.dino_type = "facebook/dinov2-with-registers-large"
|
| 79 |
+
self.dino_model: Dinov2WithRegistersModel = Dinov2WithRegistersModel(config=Dinov2WithRegistersModel.config_class.from_pretrained(
|
| 80 |
+
"facebook/dinov2-with-registers-large",
|
| 81 |
+
))
|
| 82 |
+
else:
|
| 83 |
+
raise ValueError(f"Unknown Dinov2 model: {self.cfg.pretrained_model_name_or_path}")
|
| 84 |
+
else:
|
| 85 |
+
# dino
|
| 86 |
+
conditional_vit_config = ConditionalDinov2Model.config_class.from_pretrained(
|
| 87 |
+
self.cfg.pretrained_dino_name_or_path,
|
| 88 |
+
)
|
| 89 |
+
conditional_vit_config.modulation_dim = self.cfg.camera_embeds_dim
|
| 90 |
+
self.dino_model: ConditionalDinov2Model = ConditionalDinov2Model.from_pretrained(
|
| 91 |
+
self.cfg.pretrained_dino_name_or_path,
|
| 92 |
+
config=conditional_vit_config
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
self.image_preprocess_dino = AutoImageProcessor.from_pretrained(self.dino_type)
|
| 96 |
+
self.transform_dino = transforms.Compose(
|
| 97 |
+
[
|
| 98 |
+
transforms.Resize(self.cfg.image_size_dino, transforms.InterpolationMode.BICUBIC, antialias=True),
|
| 99 |
+
transforms.CenterCrop(self.cfg.image_size_dino), # crop a (image_size_dino, image_size_dino) square
|
| 100 |
+
transforms.Normalize(
|
| 101 |
+
mean=[0.485, 0.456, 0.406],
|
| 102 |
+
std=[0.229, 0.224, 0.225],
|
| 103 |
+
),
|
| 104 |
+
]
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
if self.cfg.enable_gradient_checkpointing:
|
| 108 |
+
self.dino_model.encoder.gradient_checkpointing = True
|
| 109 |
+
|
| 110 |
+
if self.cfg.zero_uncond_embeds:
|
| 111 |
+
self.empty_image_embeds = torch.zeros((self.cfg.n_views, (self.cfg.image_size_dino // 14) ** 2 + 1, self.dino_model.config.hidden_size)).detach()
|
| 112 |
+
else:
|
| 113 |
+
if self.cfg.encode_camera:
|
| 114 |
+
self.empty_image_embeds = self.encode_image_dino(torch.zeros(self.cfg.n_views, self.cfg.image_size_dino, self.cfg.image_size_dino, 3), self.cameras[:self.cfg.n_views]).detach()
|
| 115 |
+
else:
|
| 116 |
+
self.empty_image_embeds = self.encode_image_dino(torch.zeros(self.cfg.n_views, self.cfg.image_size_dino, self.cfg.image_size_dino, 3)).detach()
|
| 117 |
+
|
| 118 |
+
# freeze the dino model parameters
|
| 119 |
+
self.dino_model.eval()
|
| 120 |
+
for k, p in self.dino_model.named_parameters():
|
| 121 |
+
ks = k.split('.')
|
| 122 |
+
if 'mod_norm1' in ks or 'mod_norm2' in ks and not self.cfg.freeze_modulation_dino:
|
| 123 |
+
p.requires_grad_(not self.cfg.freeze_modulation_dino)
|
| 124 |
+
else:
|
| 125 |
+
p.requires_grad_(False)
|
| 126 |
+
|
| 127 |
+
# load pretrained_model_name_or_path
|
| 128 |
+
if self.cfg.pretrained_model_name_or_path is not None:
|
| 129 |
+
print(f"Loading ckpt from {self.cfg.pretrained_model_name_or_path}")
|
| 130 |
+
ckpt = torch.load(self.cfg.pretrained_model_name_or_path, map_location="cpu")['state_dict']
|
| 131 |
+
pretrained_model_ckpt = {}
|
| 132 |
+
for k, v in ckpt.items():
|
| 133 |
+
if k.startswith('condition.'):
|
| 134 |
+
pretrained_model_ckpt[k.replace('condition.', '')] = v
|
| 135 |
+
self.load_state_dict(pretrained_model_ckpt, strict=True)
|
| 136 |
+
|
| 137 |
+
def encode_image_dino(self, images: Iterable[Optional[ImageType]], cameras: Optional[torch.Tensor] = None, force_none_camera_embeds: bool = False, return_dict: bool = False, **kwargs) -> torch.FloatTensor:
|
| 138 |
+
camera_embeds = None
|
| 139 |
+
if isinstance(images, (np.ndarray, torch.Tensor)): # for training process
|
| 140 |
+
assert images.min() >= 0.0 and images.max() <= 1.0, "The pixel values should be in the range of [0, 1]"
|
| 141 |
+
if self.cfg.encode_camera:
|
| 142 |
+
assert cameras is not None, "The cameras should be provided"
|
| 143 |
+
camera_embeds = self.encode_camera(cameras)
|
| 144 |
+
pixel_values = self.transform_dino(images.permute(0, 3, 1, 2))
|
| 145 |
+
else: # for inference process
|
| 146 |
+
if self.cfg.encode_camera:
|
| 147 |
+
if cameras is None:
|
| 148 |
+
bs = len(images) // self.cfg.n_views
|
| 149 |
+
cameras = self.cameras[:self.cfg.n_views].repeat(bs, 1, 1).to(self.dino_model.device)
|
| 150 |
+
camera_embeds = self.encode_camera(cameras)
|
| 151 |
+
pixel_values = self.image_preprocess_dino.preprocess(images, return_tensors='pt', \
|
| 152 |
+
do_rescale=True, do_resize=True, size=self.cfg.image_size_dino, crop_size=self.cfg.image_size_dino).pixel_values
|
| 153 |
+
|
| 154 |
+
if force_none_camera_embeds:
|
| 155 |
+
camera_embeds = None
|
| 156 |
+
|
| 157 |
+
if pixel_values.ndim == 4:
|
| 158 |
+
pixel_values = pixel_values.unsqueeze(1)
|
| 159 |
+
if camera_embeds is not None:
|
| 160 |
+
camera_embeds = camera_embeds.unsqueeze(1)
|
| 161 |
+
|
| 162 |
+
if self.cfg.encode_camera and camera_embeds is not None:
|
| 163 |
+
vision_outputs = self.dino_model(
|
| 164 |
+
rearrange(pixel_values.to(self.dino_model.device), "B N C H W -> (B N) C H W"),
|
| 165 |
+
condition=rearrange(camera_embeds, "B N C -> (B N) C"),
|
| 166 |
+
)
|
| 167 |
+
else:
|
| 168 |
+
vision_outputs = self.dino_model(
|
| 169 |
+
rearrange(pixel_values.to(self.dino_model.device), "B N C H W -> (B N) C H W"),
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if return_dict:
|
| 173 |
+
# dino
|
| 174 |
+
dino_embeds_dict = DINOEmbedOutput(
|
| 175 |
+
last_hidden_state=vision_outputs.last_hidden_state,
|
| 176 |
+
pooler_output=vision_outputs.pooler_output,
|
| 177 |
+
)
|
| 178 |
+
return dino_embeds_dict
|
| 179 |
+
else:
|
| 180 |
+
return vision_outputs.last_hidden_state
|
| 181 |
+
|
| 182 |
+
@torch.no_grad()
|
| 183 |
+
def encode_image(self, images: Iterable[Optional[ImageType]], cameras: Optional[torch.Tensor] = None, force_none_camera_embeds: bool = False, return_dict: bool = False, **kwargs) -> torch.FloatTensor:
|
| 184 |
+
dino_embeds = self.encode_image_dino(images, cameras)
|
| 185 |
+
if self.dino_model.__class__.__name__ == 'Dinov2WithRegistersModel': # x_norm_clstoken, x_norm_regtokens, x_norm_patchtokens
|
| 186 |
+
dino_embeds = torch.cat(
|
| 187 |
+
[dino_embeds[:, :1], dino_embeds[:, self.dino_model.config.num_register_tokens + 1:]],
|
| 188 |
+
dim=1
|
| 189 |
+
)
|
| 190 |
+
return dino_embeds
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_with_registers/__pycache__/modeling_dinov2_with_registers.cpython-310.pyc
ADDED
|
Binary file (30.9 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/conditional_encoders/dinov2_with_registers/modeling_dinov2_with_registers.py
ADDED
|
@@ -0,0 +1,946 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_dinov2_with_registers.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
#
|
| 11 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 12 |
+
# you may not use this file except in compliance with the License.
|
| 13 |
+
# You may obtain a copy of the License at
|
| 14 |
+
#
|
| 15 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 16 |
+
#
|
| 17 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 18 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 19 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 20 |
+
# See the License for the specific language governing permissions and
|
| 21 |
+
# limitations under the License.
|
| 22 |
+
|
| 23 |
+
import collections.abc
|
| 24 |
+
import math
|
| 25 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
from torch import nn
|
| 29 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 30 |
+
|
| 31 |
+
from transformers.activations import ACT2FN
|
| 32 |
+
from transformers.modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
|
| 33 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 34 |
+
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
| 35 |
+
from transformers.utils import (
|
| 36 |
+
add_code_sample_docstrings,
|
| 37 |
+
add_start_docstrings,
|
| 38 |
+
add_start_docstrings_to_model_forward,
|
| 39 |
+
logging,
|
| 40 |
+
replace_return_docstrings,
|
| 41 |
+
torch_int,
|
| 42 |
+
)
|
| 43 |
+
from transformers.utils.backbone_utils import BackboneMixin
|
| 44 |
+
from transformers.models.dinov2_with_registers.configuration_dinov2_with_registers import Dinov2WithRegistersConfig
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
# Base docstring
|
| 50 |
+
_CHECKPOINT_FOR_DOC = "facebook/dinov2_with_registers-base"
|
| 51 |
+
|
| 52 |
+
# General docstring
|
| 53 |
+
_CONFIG_FOR_DOC = "Dinov2WithRegistersConfig"
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Dinov2WithRegistersPatchEmbeddings(nn.Module):
|
| 57 |
+
"""
|
| 58 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
| 59 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
| 60 |
+
Transformer.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, config):
|
| 64 |
+
super().__init__()
|
| 65 |
+
image_size, patch_size = config.image_size, config.patch_size
|
| 66 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
| 67 |
+
|
| 68 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
| 69 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
| 70 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
| 71 |
+
self.image_size = image_size
|
| 72 |
+
self.patch_size = patch_size
|
| 73 |
+
self.num_channels = num_channels
|
| 74 |
+
self.num_patches = num_patches
|
| 75 |
+
|
| 76 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
| 77 |
+
|
| 78 |
+
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
| 79 |
+
num_channels = pixel_values.shape[1]
|
| 80 |
+
if num_channels != self.num_channels:
|
| 81 |
+
raise ValueError(
|
| 82 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
| 83 |
+
f" Expected {self.num_channels} but got {num_channels}."
|
| 84 |
+
)
|
| 85 |
+
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
|
| 86 |
+
return embeddings
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class Dinov2WithRegistersEmbeddings(nn.Module):
|
| 90 |
+
"""
|
| 91 |
+
Construct the CLS token, mask token, register tokens, position and patch embeddings.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 95 |
+
super().__init__()
|
| 96 |
+
|
| 97 |
+
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
|
| 98 |
+
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
|
| 99 |
+
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
|
| 100 |
+
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
|
| 101 |
+
num_patches = self.patch_embeddings.num_patches
|
| 102 |
+
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
|
| 103 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 104 |
+
self.patch_size = config.patch_size
|
| 105 |
+
self.config = config
|
| 106 |
+
|
| 107 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
| 108 |
+
"""
|
| 109 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
|
| 110 |
+
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
|
| 111 |
+
with the original implementation.
|
| 112 |
+
|
| 113 |
+
Adapted from:
|
| 114 |
+
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
|
| 115 |
+
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
|
| 116 |
+
"""
|
| 117 |
+
num_patches = embeddings.shape[1] - 1
|
| 118 |
+
num_positions = self.position_embeddings.shape[1] - 1
|
| 119 |
+
|
| 120 |
+
# Skip interpolation for matching dimensions (unless tracing)
|
| 121 |
+
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
|
| 122 |
+
return self.position_embeddings
|
| 123 |
+
|
| 124 |
+
# Handle class token and patch embeddings separately
|
| 125 |
+
class_pos_embed = self.position_embeddings[:, 0]
|
| 126 |
+
patch_pos_embed = self.position_embeddings[:, 1:]
|
| 127 |
+
dim = embeddings.shape[-1]
|
| 128 |
+
|
| 129 |
+
# Calculate new dimensions
|
| 130 |
+
height = height // self.config.patch_size
|
| 131 |
+
width = width // self.config.patch_size
|
| 132 |
+
|
| 133 |
+
# Reshape for interpolation
|
| 134 |
+
sqrt_num_positions = torch_int(num_positions**0.5)
|
| 135 |
+
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
|
| 136 |
+
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
| 137 |
+
|
| 138 |
+
# Store original dtype for restoration after interpolation
|
| 139 |
+
target_dtype = patch_pos_embed.dtype
|
| 140 |
+
|
| 141 |
+
# Interpolate at float32 precision
|
| 142 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 143 |
+
patch_pos_embed.to(dtype=torch.float32),
|
| 144 |
+
size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
|
| 145 |
+
mode="bicubic",
|
| 146 |
+
align_corners=False,
|
| 147 |
+
antialias=True,
|
| 148 |
+
).to(dtype=target_dtype)
|
| 149 |
+
|
| 150 |
+
# Validate output dimensions if not tracing
|
| 151 |
+
if not torch.jit.is_tracing():
|
| 152 |
+
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
|
| 153 |
+
raise ValueError("Width or height does not match with the interpolated position embeddings")
|
| 154 |
+
|
| 155 |
+
# Reshape back to original format
|
| 156 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 157 |
+
|
| 158 |
+
# Combine class and patch embeddings
|
| 159 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
|
| 160 |
+
|
| 161 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 162 |
+
batch_size, _, height, width = pixel_values.shape
|
| 163 |
+
target_dtype = self.patch_embeddings.projection.weight.dtype
|
| 164 |
+
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
|
| 165 |
+
|
| 166 |
+
if bool_masked_pos is not None:
|
| 167 |
+
embeddings = torch.where(
|
| 168 |
+
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# add the [CLS] token to the embedded patch tokens
|
| 172 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 173 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
| 174 |
+
|
| 175 |
+
# add positional encoding to each token
|
| 176 |
+
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
| 177 |
+
|
| 178 |
+
# add register tokens
|
| 179 |
+
embeddings = torch.cat(
|
| 180 |
+
(embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
embeddings = self.dropout(embeddings)
|
| 184 |
+
|
| 185 |
+
return embeddings
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class Dinov2WithRegistersSelfAttention(nn.Module):
|
| 189 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 190 |
+
super().__init__()
|
| 191 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 192 |
+
raise ValueError(
|
| 193 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
| 194 |
+
f"heads {config.num_attention_heads}."
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
self.num_attention_heads = config.num_attention_heads
|
| 198 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 199 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 200 |
+
|
| 201 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 202 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 203 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
| 204 |
+
|
| 205 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 206 |
+
|
| 207 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
| 208 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 209 |
+
x = x.view(new_x_shape)
|
| 210 |
+
return x.permute(0, 2, 1, 3)
|
| 211 |
+
|
| 212 |
+
def forward(
|
| 213 |
+
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
|
| 214 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 215 |
+
mixed_query_layer = self.query(hidden_states)
|
| 216 |
+
|
| 217 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 218 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 219 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 220 |
+
|
| 221 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 222 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 223 |
+
|
| 224 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 225 |
+
|
| 226 |
+
# Normalize the attention scores to probabilities.
|
| 227 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 228 |
+
|
| 229 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 230 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 231 |
+
attention_probs = self.dropout(attention_probs)
|
| 232 |
+
|
| 233 |
+
# Mask heads if we want to
|
| 234 |
+
if head_mask is not None:
|
| 235 |
+
attention_probs = attention_probs * head_mask
|
| 236 |
+
|
| 237 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
| 238 |
+
|
| 239 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 240 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 241 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 242 |
+
|
| 243 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 244 |
+
|
| 245 |
+
return outputs
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class Dinov2WithRegistersSdpaSelfAttention(Dinov2WithRegistersSelfAttention):
|
| 249 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 250 |
+
super().__init__(config)
|
| 251 |
+
self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
|
| 252 |
+
|
| 253 |
+
def forward(
|
| 254 |
+
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
|
| 255 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 256 |
+
if output_attentions:
|
| 257 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 258 |
+
logger.warning_once(
|
| 259 |
+
"Dinov2WithRegistersModel is using Dinov2WithRegistersSdpaSelfAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 260 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 261 |
+
)
|
| 262 |
+
return super().forward(
|
| 263 |
+
hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
mixed_query_layer = self.query(hidden_states)
|
| 267 |
+
|
| 268 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 269 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 270 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 271 |
+
|
| 272 |
+
context_layer = torch.nn.functional.scaled_dot_product_attention(
|
| 273 |
+
query_layer,
|
| 274 |
+
key_layer,
|
| 275 |
+
value_layer,
|
| 276 |
+
head_mask,
|
| 277 |
+
self.attention_probs_dropout_prob if self.training else 0.0,
|
| 278 |
+
is_causal=False,
|
| 279 |
+
scale=None,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 283 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 284 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 285 |
+
|
| 286 |
+
return context_layer, None
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class Dinov2WithRegistersSelfOutput(nn.Module):
|
| 290 |
+
"""
|
| 291 |
+
The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
|
| 292 |
+
layernorm applied before each block.
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 296 |
+
super().__init__()
|
| 297 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 298 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 299 |
+
|
| 300 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
| 301 |
+
hidden_states = self.dense(hidden_states)
|
| 302 |
+
hidden_states = self.dropout(hidden_states)
|
| 303 |
+
|
| 304 |
+
return hidden_states
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class Dinov2WithRegistersAttention(nn.Module):
|
| 308 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.attention = Dinov2WithRegistersSelfAttention(config)
|
| 311 |
+
self.output = Dinov2WithRegistersSelfOutput(config)
|
| 312 |
+
self.pruned_heads = set()
|
| 313 |
+
|
| 314 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
| 315 |
+
if len(heads) == 0:
|
| 316 |
+
return
|
| 317 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 318 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# Prune linear layers
|
| 322 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
| 323 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
| 324 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
| 325 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 326 |
+
|
| 327 |
+
# Update hyper params and store pruned heads
|
| 328 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
| 329 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
| 330 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 331 |
+
|
| 332 |
+
def forward(
|
| 333 |
+
self,
|
| 334 |
+
hidden_states: torch.Tensor,
|
| 335 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 336 |
+
output_attentions: bool = False,
|
| 337 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 338 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions)
|
| 339 |
+
|
| 340 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 341 |
+
|
| 342 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 343 |
+
return outputs
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
class Dinov2WithRegistersSdpaAttention(Dinov2WithRegistersAttention):
|
| 347 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 348 |
+
super().__init__(config)
|
| 349 |
+
self.attention = Dinov2WithRegistersSdpaSelfAttention(config)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class Dinov2WithRegistersLayerScale(nn.Module):
|
| 353 |
+
def __init__(self, config) -> None:
|
| 354 |
+
super().__init__()
|
| 355 |
+
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
|
| 356 |
+
|
| 357 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 358 |
+
return hidden_state * self.lambda1
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
| 362 |
+
"""
|
| 363 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 364 |
+
|
| 365 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
| 366 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
| 367 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
| 368 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
| 369 |
+
argument.
|
| 370 |
+
"""
|
| 371 |
+
if drop_prob == 0.0 or not training:
|
| 372 |
+
return input
|
| 373 |
+
keep_prob = 1 - drop_prob
|
| 374 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 375 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
| 376 |
+
random_tensor.floor_() # binarize
|
| 377 |
+
output = input.div(keep_prob) * random_tensor
|
| 378 |
+
return output
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
class Dinov2WithRegistersDropPath(nn.Module):
|
| 382 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 383 |
+
|
| 384 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
| 385 |
+
super().__init__()
|
| 386 |
+
self.drop_prob = drop_prob
|
| 387 |
+
|
| 388 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 389 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
| 390 |
+
|
| 391 |
+
def extra_repr(self) -> str:
|
| 392 |
+
return "p={}".format(self.drop_prob)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
class Dinov2WithRegistersMLP(nn.Module):
|
| 396 |
+
def __init__(self, config) -> None:
|
| 397 |
+
super().__init__()
|
| 398 |
+
in_features = out_features = config.hidden_size
|
| 399 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
| 400 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
|
| 401 |
+
if isinstance(config.hidden_act, str):
|
| 402 |
+
self.activation = ACT2FN[config.hidden_act]
|
| 403 |
+
else:
|
| 404 |
+
self.activation = config.hidden_act
|
| 405 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
|
| 406 |
+
|
| 407 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 408 |
+
hidden_state = self.fc1(hidden_state)
|
| 409 |
+
hidden_state = self.activation(hidden_state)
|
| 410 |
+
hidden_state = self.fc2(hidden_state)
|
| 411 |
+
return hidden_state
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
class Dinov2WithRegistersSwiGLUFFN(nn.Module):
|
| 415 |
+
def __init__(self, config) -> None:
|
| 416 |
+
super().__init__()
|
| 417 |
+
in_features = out_features = config.hidden_size
|
| 418 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
| 419 |
+
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
|
| 420 |
+
|
| 421 |
+
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
|
| 422 |
+
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
|
| 423 |
+
|
| 424 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
| 425 |
+
hidden_state = self.weights_in(hidden_state)
|
| 426 |
+
x1, x2 = hidden_state.chunk(2, dim=-1)
|
| 427 |
+
hidden = nn.functional.silu(x1) * x2
|
| 428 |
+
return self.weights_out(hidden)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
DINOV2_WITH_REGISTERS_ATTENTION_CLASSES = {
|
| 432 |
+
"eager": Dinov2WithRegistersAttention,
|
| 433 |
+
"sdpa": Dinov2WithRegistersSdpaAttention,
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class Dinov2WithRegistersLayer(nn.Module):
|
| 438 |
+
"""This corresponds to the Block class in the original implementation."""
|
| 439 |
+
|
| 440 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 441 |
+
super().__init__()
|
| 442 |
+
|
| 443 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 444 |
+
self.attention = DINOV2_WITH_REGISTERS_ATTENTION_CLASSES[config._attn_implementation](config)
|
| 445 |
+
self.layer_scale1 = Dinov2WithRegistersLayerScale(config)
|
| 446 |
+
self.drop_path = (
|
| 447 |
+
Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 451 |
+
|
| 452 |
+
if config.use_swiglu_ffn:
|
| 453 |
+
self.mlp = Dinov2WithRegistersSwiGLUFFN(config)
|
| 454 |
+
else:
|
| 455 |
+
self.mlp = Dinov2WithRegistersMLP(config)
|
| 456 |
+
self.layer_scale2 = Dinov2WithRegistersLayerScale(config)
|
| 457 |
+
|
| 458 |
+
def forward(
|
| 459 |
+
self,
|
| 460 |
+
hidden_states: torch.Tensor,
|
| 461 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 462 |
+
output_attentions: bool = False,
|
| 463 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
| 464 |
+
self_attention_outputs = self.attention(
|
| 465 |
+
self.norm1(hidden_states), # in Dinov2WithRegisters, layernorm is applied before self-attention
|
| 466 |
+
head_mask,
|
| 467 |
+
output_attentions=output_attentions,
|
| 468 |
+
)
|
| 469 |
+
attention_output = self_attention_outputs[0]
|
| 470 |
+
|
| 471 |
+
attention_output = self.layer_scale1(attention_output)
|
| 472 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
| 473 |
+
|
| 474 |
+
# first residual connection
|
| 475 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
| 476 |
+
|
| 477 |
+
# in Dinov2WithRegisters, layernorm is also applied after self-attention
|
| 478 |
+
layer_output = self.norm2(hidden_states)
|
| 479 |
+
layer_output = self.mlp(layer_output)
|
| 480 |
+
layer_output = self.layer_scale2(layer_output)
|
| 481 |
+
|
| 482 |
+
# second residual connection
|
| 483 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
| 484 |
+
|
| 485 |
+
outputs = (layer_output,) + outputs
|
| 486 |
+
|
| 487 |
+
return outputs
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class Dinov2WithRegistersEncoder(nn.Module):
|
| 491 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 492 |
+
super().__init__()
|
| 493 |
+
self.config = config
|
| 494 |
+
self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)])
|
| 495 |
+
self.gradient_checkpointing = False
|
| 496 |
+
|
| 497 |
+
def forward(
|
| 498 |
+
self,
|
| 499 |
+
hidden_states: torch.Tensor,
|
| 500 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 501 |
+
output_attentions: bool = False,
|
| 502 |
+
output_hidden_states: bool = False,
|
| 503 |
+
return_dict: bool = True,
|
| 504 |
+
) -> Union[tuple, BaseModelOutput]:
|
| 505 |
+
all_hidden_states = () if output_hidden_states else None
|
| 506 |
+
all_self_attentions = () if output_attentions else None
|
| 507 |
+
|
| 508 |
+
for i, layer_module in enumerate(self.layer):
|
| 509 |
+
if output_hidden_states:
|
| 510 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 511 |
+
|
| 512 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 513 |
+
|
| 514 |
+
if self.gradient_checkpointing and self.training:
|
| 515 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 516 |
+
layer_module.__call__,
|
| 517 |
+
hidden_states,
|
| 518 |
+
layer_head_mask,
|
| 519 |
+
output_attentions,
|
| 520 |
+
)
|
| 521 |
+
else:
|
| 522 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
|
| 523 |
+
|
| 524 |
+
hidden_states = layer_outputs[0]
|
| 525 |
+
|
| 526 |
+
if output_attentions:
|
| 527 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 528 |
+
|
| 529 |
+
if output_hidden_states:
|
| 530 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 531 |
+
|
| 532 |
+
if not return_dict:
|
| 533 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
| 534 |
+
return BaseModelOutput(
|
| 535 |
+
last_hidden_state=hidden_states,
|
| 536 |
+
hidden_states=all_hidden_states,
|
| 537 |
+
attentions=all_self_attentions,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
|
| 542 |
+
"""
|
| 543 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 544 |
+
models.
|
| 545 |
+
"""
|
| 546 |
+
|
| 547 |
+
config_class = Dinov2WithRegistersConfig
|
| 548 |
+
base_model_prefix = "dinov2_with_registers"
|
| 549 |
+
main_input_name = "pixel_values"
|
| 550 |
+
supports_gradient_checkpointing = True
|
| 551 |
+
_no_split_modules = ["Dinov2WithRegistersSwiGLUFFN"]
|
| 552 |
+
_supports_sdpa = True
|
| 553 |
+
|
| 554 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
| 555 |
+
"""Initialize the weights"""
|
| 556 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 557 |
+
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
|
| 558 |
+
# `trunc_normal_cpu` not implemented in `half` issues
|
| 559 |
+
module.weight.data = nn.init.trunc_normal_(
|
| 560 |
+
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
|
| 561 |
+
).to(module.weight.dtype)
|
| 562 |
+
if module.bias is not None:
|
| 563 |
+
module.bias.data.zero_()
|
| 564 |
+
elif isinstance(module, nn.LayerNorm):
|
| 565 |
+
module.bias.data.zero_()
|
| 566 |
+
module.weight.data.fill_(1.0)
|
| 567 |
+
elif isinstance(module, Dinov2WithRegistersEmbeddings):
|
| 568 |
+
module.position_embeddings.data = nn.init.trunc_normal_(
|
| 569 |
+
module.position_embeddings.data.to(torch.float32),
|
| 570 |
+
mean=0.0,
|
| 571 |
+
std=self.config.initializer_range,
|
| 572 |
+
).to(module.position_embeddings.dtype)
|
| 573 |
+
|
| 574 |
+
module.cls_token.data = nn.init.trunc_normal_(
|
| 575 |
+
module.cls_token.data.to(torch.float32),
|
| 576 |
+
mean=0.0,
|
| 577 |
+
std=self.config.initializer_range,
|
| 578 |
+
).to(module.cls_token.dtype)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
DINOV2_WITH_REGISTERS_START_DOCSTRING = r"""
|
| 585 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
| 586 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 587 |
+
behavior.
|
| 588 |
+
|
| 589 |
+
Parameters:
|
| 590 |
+
config ([`Dinov2WithRegistersConfig`]): Model configuration class with all the parameters of the model.
|
| 591 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 592 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 593 |
+
"""
|
| 594 |
+
|
| 595 |
+
DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING = r"""
|
| 596 |
+
Args:
|
| 597 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 598 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 599 |
+
[`BitImageProcessor.preprocess`] for details.
|
| 600 |
+
|
| 601 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
|
| 602 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
|
| 603 |
+
pre-training.
|
| 604 |
+
|
| 605 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 606 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 607 |
+
|
| 608 |
+
- 1 indicates the head is **not masked**,
|
| 609 |
+
- 0 indicates the head is **masked**.
|
| 610 |
+
|
| 611 |
+
output_attentions (`bool`, *optional*):
|
| 612 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 613 |
+
tensors for more detail.
|
| 614 |
+
output_hidden_states (`bool`, *optional*):
|
| 615 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 616 |
+
more detail.
|
| 617 |
+
return_dict (`bool`, *optional*):
|
| 618 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 619 |
+
"""
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
@add_start_docstrings(
|
| 623 |
+
"The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top.",
|
| 624 |
+
DINOV2_WITH_REGISTERS_START_DOCSTRING,
|
| 625 |
+
)
|
| 626 |
+
class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
|
| 627 |
+
def __init__(self, config: Dinov2WithRegistersConfig):
|
| 628 |
+
super().__init__(config)
|
| 629 |
+
self.config = config
|
| 630 |
+
|
| 631 |
+
self.embeddings = Dinov2WithRegistersEmbeddings(config)
|
| 632 |
+
self.encoder = Dinov2WithRegistersEncoder(config)
|
| 633 |
+
|
| 634 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 635 |
+
|
| 636 |
+
# Initialize weights and apply final processing
|
| 637 |
+
self.post_init()
|
| 638 |
+
|
| 639 |
+
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
|
| 640 |
+
return self.embeddings.patch_embeddings
|
| 641 |
+
|
| 642 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
| 643 |
+
"""
|
| 644 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 645 |
+
class PreTrainedModel
|
| 646 |
+
"""
|
| 647 |
+
for layer, heads in heads_to_prune.items():
|
| 648 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 649 |
+
|
| 650 |
+
@add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING)
|
| 651 |
+
@add_code_sample_docstrings(
|
| 652 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 653 |
+
output_type=BaseModelOutputWithPooling,
|
| 654 |
+
config_class=_CONFIG_FOR_DOC,
|
| 655 |
+
modality="vision",
|
| 656 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
| 657 |
+
)
|
| 658 |
+
def forward(
|
| 659 |
+
self,
|
| 660 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 661 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
| 662 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 663 |
+
output_attentions: Optional[bool] = None,
|
| 664 |
+
output_hidden_states: Optional[bool] = None,
|
| 665 |
+
return_dict: Optional[bool] = None,
|
| 666 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 667 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 668 |
+
output_hidden_states = (
|
| 669 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 670 |
+
)
|
| 671 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 672 |
+
|
| 673 |
+
if pixel_values is None:
|
| 674 |
+
raise ValueError("You have to specify pixel_values")
|
| 675 |
+
|
| 676 |
+
# Prepare head mask if needed
|
| 677 |
+
# 1.0 in head_mask indicate we keep the head
|
| 678 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 679 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 680 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 681 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 682 |
+
|
| 683 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
| 684 |
+
|
| 685 |
+
encoder_outputs = self.encoder(
|
| 686 |
+
embedding_output,
|
| 687 |
+
head_mask=head_mask,
|
| 688 |
+
output_attentions=output_attentions,
|
| 689 |
+
output_hidden_states=output_hidden_states,
|
| 690 |
+
return_dict=return_dict,
|
| 691 |
+
)
|
| 692 |
+
sequence_output = encoder_outputs[0]
|
| 693 |
+
sequence_output = self.layernorm(sequence_output)
|
| 694 |
+
pooled_output = sequence_output[:, 0, :]
|
| 695 |
+
|
| 696 |
+
if not return_dict:
|
| 697 |
+
head_outputs = (sequence_output, pooled_output)
|
| 698 |
+
return head_outputs + encoder_outputs[1:]
|
| 699 |
+
|
| 700 |
+
return BaseModelOutputWithPooling(
|
| 701 |
+
last_hidden_state=sequence_output,
|
| 702 |
+
pooler_output=pooled_output,
|
| 703 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 704 |
+
attentions=encoder_outputs.attentions,
|
| 705 |
+
)
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
# Image classification docstring
|
| 709 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2_with_registers-small-imagenet1k-1-layer"
|
| 710 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
| 711 |
+
|
| 712 |
+
DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING = r"""
|
| 713 |
+
Args:
|
| 714 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 715 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
| 716 |
+
[`BitImageProcessor.preprocess`] for details.
|
| 717 |
+
|
| 718 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 719 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 720 |
+
|
| 721 |
+
- 1 indicates the head is **not masked**,
|
| 722 |
+
- 0 indicates the head is **masked**.
|
| 723 |
+
|
| 724 |
+
output_attentions (`bool`, *optional*):
|
| 725 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 726 |
+
tensors for more detail.
|
| 727 |
+
output_hidden_states (`bool`, *optional*):
|
| 728 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 729 |
+
more detail.
|
| 730 |
+
return_dict (`bool`, *optional*):
|
| 731 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
@add_start_docstrings(
|
| 736 |
+
"""
|
| 737 |
+
Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state
|
| 738 |
+
of the [CLS] token) e.g. for ImageNet.
|
| 739 |
+
""",
|
| 740 |
+
DINOV2_WITH_REGISTERS_START_DOCSTRING,
|
| 741 |
+
)
|
| 742 |
+
class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
|
| 743 |
+
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
|
| 744 |
+
super().__init__(config)
|
| 745 |
+
|
| 746 |
+
self.num_labels = config.num_labels
|
| 747 |
+
self.dinov2_with_registers = Dinov2WithRegistersModel(config)
|
| 748 |
+
|
| 749 |
+
# Classifier head
|
| 750 |
+
self.classifier = (
|
| 751 |
+
nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
# Initialize weights and apply final processing
|
| 755 |
+
self.post_init()
|
| 756 |
+
|
| 757 |
+
@add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
|
| 758 |
+
@add_code_sample_docstrings(
|
| 759 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
| 760 |
+
output_type=ImageClassifierOutput,
|
| 761 |
+
config_class=_CONFIG_FOR_DOC,
|
| 762 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
| 763 |
+
)
|
| 764 |
+
def forward(
|
| 765 |
+
self,
|
| 766 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 767 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 768 |
+
labels: Optional[torch.Tensor] = None,
|
| 769 |
+
output_attentions: Optional[bool] = None,
|
| 770 |
+
output_hidden_states: Optional[bool] = None,
|
| 771 |
+
return_dict: Optional[bool] = None,
|
| 772 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
| 773 |
+
r"""
|
| 774 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 775 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
| 776 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 777 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 778 |
+
"""
|
| 779 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 780 |
+
|
| 781 |
+
outputs = self.dinov2_with_registers(
|
| 782 |
+
pixel_values,
|
| 783 |
+
head_mask=head_mask,
|
| 784 |
+
output_attentions=output_attentions,
|
| 785 |
+
output_hidden_states=output_hidden_states,
|
| 786 |
+
return_dict=return_dict,
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
|
| 790 |
+
|
| 791 |
+
cls_token = sequence_output[:, 0]
|
| 792 |
+
patch_tokens = sequence_output[:, 1:]
|
| 793 |
+
|
| 794 |
+
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
|
| 795 |
+
|
| 796 |
+
logits = self.classifier(linear_input)
|
| 797 |
+
|
| 798 |
+
loss = None
|
| 799 |
+
if labels is not None:
|
| 800 |
+
# move labels to correct device to enable model parallelism
|
| 801 |
+
labels = labels.to(logits.device)
|
| 802 |
+
if self.config.problem_type is None:
|
| 803 |
+
if self.num_labels == 1:
|
| 804 |
+
self.config.problem_type = "regression"
|
| 805 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 806 |
+
self.config.problem_type = "single_label_classification"
|
| 807 |
+
else:
|
| 808 |
+
self.config.problem_type = "multi_label_classification"
|
| 809 |
+
|
| 810 |
+
if self.config.problem_type == "regression":
|
| 811 |
+
loss_fct = MSELoss()
|
| 812 |
+
if self.num_labels == 1:
|
| 813 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 814 |
+
else:
|
| 815 |
+
loss = loss_fct(logits, labels)
|
| 816 |
+
elif self.config.problem_type == "single_label_classification":
|
| 817 |
+
loss_fct = CrossEntropyLoss()
|
| 818 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 819 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 820 |
+
loss_fct = BCEWithLogitsLoss()
|
| 821 |
+
loss = loss_fct(logits, labels)
|
| 822 |
+
|
| 823 |
+
if not return_dict:
|
| 824 |
+
output = (logits,) + outputs[2:]
|
| 825 |
+
return ((loss,) + output) if loss is not None else output
|
| 826 |
+
|
| 827 |
+
return ImageClassifierOutput(
|
| 828 |
+
loss=loss,
|
| 829 |
+
logits=logits,
|
| 830 |
+
hidden_states=outputs.hidden_states,
|
| 831 |
+
attentions=outputs.attentions,
|
| 832 |
+
)
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
@add_start_docstrings(
|
| 836 |
+
"""
|
| 837 |
+
Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.
|
| 838 |
+
""",
|
| 839 |
+
DINOV2_WITH_REGISTERS_START_DOCSTRING,
|
| 840 |
+
)
|
| 841 |
+
class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
|
| 842 |
+
def __init__(self, config):
|
| 843 |
+
super().__init__(config)
|
| 844 |
+
super()._init_backbone(config)
|
| 845 |
+
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
|
| 846 |
+
self.embeddings = Dinov2WithRegistersEmbeddings(config)
|
| 847 |
+
self.encoder = Dinov2WithRegistersEncoder(config)
|
| 848 |
+
|
| 849 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 850 |
+
|
| 851 |
+
self.num_register_tokens = config.num_register_tokens
|
| 852 |
+
|
| 853 |
+
# Initialize weights and apply final processing
|
| 854 |
+
self.post_init()
|
| 855 |
+
|
| 856 |
+
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
|
| 857 |
+
return self.embeddings.patch_embeddings
|
| 858 |
+
|
| 859 |
+
@add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
|
| 860 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
| 861 |
+
def forward(
|
| 862 |
+
self,
|
| 863 |
+
pixel_values: torch.Tensor,
|
| 864 |
+
output_hidden_states: Optional[bool] = None,
|
| 865 |
+
output_attentions: Optional[bool] = None,
|
| 866 |
+
return_dict: Optional[bool] = None,
|
| 867 |
+
) -> BackboneOutput:
|
| 868 |
+
"""
|
| 869 |
+
Returns:
|
| 870 |
+
|
| 871 |
+
Examples:
|
| 872 |
+
Returns:
|
| 873 |
+
|
| 874 |
+
Examples:
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
```python
|
| 878 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
| 879 |
+
>>> import torch
|
| 880 |
+
>>> from PIL import Image
|
| 881 |
+
>>> import requests
|
| 882 |
+
|
| 883 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 884 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 885 |
+
|
| 886 |
+
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
|
| 887 |
+
>>> model = AutoBackbone.from_pretrained(
|
| 888 |
+
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
|
| 889 |
+
... )
|
| 890 |
+
|
| 891 |
+
>>> inputs = processor(image, return_tensors="pt")
|
| 892 |
+
|
| 893 |
+
>>> outputs = model(**inputs)
|
| 894 |
+
>>> feature_maps = outputs.feature_maps
|
| 895 |
+
>>> list(feature_maps[-1].shape)
|
| 896 |
+
[1, 768, 16, 16]
|
| 897 |
+
```"""
|
| 898 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 899 |
+
output_hidden_states = (
|
| 900 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 901 |
+
)
|
| 902 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 903 |
+
|
| 904 |
+
embedding_output = self.embeddings(pixel_values)
|
| 905 |
+
|
| 906 |
+
outputs = self.encoder(
|
| 907 |
+
embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
| 911 |
+
|
| 912 |
+
feature_maps = ()
|
| 913 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
| 914 |
+
if stage in self.out_features:
|
| 915 |
+
if self.config.apply_layernorm:
|
| 916 |
+
hidden_state = self.layernorm(hidden_state)
|
| 917 |
+
if self.config.reshape_hidden_states:
|
| 918 |
+
hidden_state = hidden_state[:, self.num_register_tokens + 1 :]
|
| 919 |
+
# this was actually a bug in the original implementation that we copied here,
|
| 920 |
+
# cause normally the order is height, width
|
| 921 |
+
batch_size, _, height, width = pixel_values.shape
|
| 922 |
+
patch_size = self.config.patch_size
|
| 923 |
+
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
|
| 924 |
+
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
|
| 925 |
+
feature_maps += (hidden_state,)
|
| 926 |
+
|
| 927 |
+
if not return_dict:
|
| 928 |
+
if output_hidden_states:
|
| 929 |
+
output = (feature_maps,) + outputs[1:]
|
| 930 |
+
else:
|
| 931 |
+
output = (feature_maps,) + outputs[2:]
|
| 932 |
+
return output
|
| 933 |
+
|
| 934 |
+
return BackboneOutput(
|
| 935 |
+
feature_maps=feature_maps,
|
| 936 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
| 937 |
+
attentions=outputs.attentions if output_attentions else None,
|
| 938 |
+
)
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
__all__ = [
|
| 942 |
+
"Dinov2WithRegistersPreTrainedModel",
|
| 943 |
+
"Dinov2WithRegistersModel",
|
| 944 |
+
"Dinov2WithRegistersForImageClassification",
|
| 945 |
+
"Dinov2WithRegistersBackbone",
|
| 946 |
+
]
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
pixart_denoiser
|
| 3 |
+
)
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (219 Bytes). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/pixart_denoiser.cpython-310.pyc
ADDED
|
Binary file (4.94 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (6 kB). View file
|
|
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/pixart_denoiser.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import math
|
| 6 |
+
import importlib
|
| 7 |
+
import craftsman
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
from typing import Optional
|
| 11 |
+
from craftsman.utils.base import BaseModule
|
| 12 |
+
from craftsman.models.denoisers.utils import *
|
| 13 |
+
|
| 14 |
+
@craftsman.register("pixart-denoiser")
|
| 15 |
+
class PixArtDinoDenoiser(BaseModule):
|
| 16 |
+
@dataclass
|
| 17 |
+
class Config(BaseModule.Config):
|
| 18 |
+
pretrained_model_name_or_path: Optional[str] = None
|
| 19 |
+
input_channels: int = 32
|
| 20 |
+
output_channels: int = 32
|
| 21 |
+
n_ctx: int = 512
|
| 22 |
+
width: int = 768
|
| 23 |
+
layers: int = 28
|
| 24 |
+
heads: int = 16
|
| 25 |
+
context_dim: int = 1024
|
| 26 |
+
n_views: int = 1
|
| 27 |
+
context_ln: bool = True
|
| 28 |
+
init_scale: float = 0.25
|
| 29 |
+
use_checkpoint: bool = False
|
| 30 |
+
drop_path: float = 0.
|
| 31 |
+
qkv_fuse: bool = True
|
| 32 |
+
clip_weight: float = 1.0
|
| 33 |
+
dino_weight: float = 1.0
|
| 34 |
+
condition_type: str = "clip_dinov2"
|
| 35 |
+
use_RMSNorm: bool = False
|
| 36 |
+
|
| 37 |
+
cfg: Config
|
| 38 |
+
|
| 39 |
+
def configure(self) -> None:
|
| 40 |
+
super().configure()
|
| 41 |
+
|
| 42 |
+
# timestep embedding
|
| 43 |
+
self.time_embed = TimestepEmbedder(self.cfg.width)
|
| 44 |
+
|
| 45 |
+
# x embedding
|
| 46 |
+
self.x_embed = nn.Linear(self.cfg.input_channels, self.cfg.width, bias=True)
|
| 47 |
+
|
| 48 |
+
# context embedding
|
| 49 |
+
if self.cfg.context_ln:
|
| 50 |
+
if "clip" in self.cfg.condition_type:
|
| 51 |
+
self.clip_embed = nn.Sequential(
|
| 52 |
+
nn.RMSNorm(self.cfg.context_dim) if self.cfg.use_RMSNorm else nn.LayerNorm(self.cfg.context_dim),
|
| 53 |
+
nn.Linear(self.cfg.context_dim, self.cfg.width),
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
if "dino" in self.cfg.condition_type:
|
| 57 |
+
self.dino_embed = nn.Sequential(
|
| 58 |
+
nn.RMSNorm(self.cfg.context_dim) if self.cfg.use_RMSNorm else nn.LayerNorm(self.cfg.context_dim),
|
| 59 |
+
nn.Linear(self.cfg.context_dim, self.cfg.width),
|
| 60 |
+
)
|
| 61 |
+
else:
|
| 62 |
+
if "clip" in self.cfg.condition_type:
|
| 63 |
+
self.clip_embed = nn.Linear(self.cfg.context_dim, self.cfg.width)
|
| 64 |
+
if "dino" in self.cfg.condition_type:
|
| 65 |
+
self.dino_embed = nn.Linear(self.cfg.context_dim, self.cfg.width)
|
| 66 |
+
|
| 67 |
+
init_scale = self.cfg.init_scale * math.sqrt(1.0 / self.cfg.width)
|
| 68 |
+
drop_path = [x.item() for x in torch.linspace(0, self.cfg.drop_path, self.cfg.layers)]
|
| 69 |
+
self.blocks = nn.ModuleList([
|
| 70 |
+
DiTBlock(
|
| 71 |
+
width=self.cfg.width,
|
| 72 |
+
heads=self.cfg.heads,
|
| 73 |
+
init_scale=init_scale,
|
| 74 |
+
qkv_bias=self.cfg.drop_path,
|
| 75 |
+
qkv_fuse=self.cfg.qkv_fuse,
|
| 76 |
+
use_RMSNorm=self.cfg.use_RMSNorm,
|
| 77 |
+
use_flash=True,
|
| 78 |
+
drop_path=drop_path[i]
|
| 79 |
+
)
|
| 80 |
+
for i in range(self.cfg.layers)
|
| 81 |
+
])
|
| 82 |
+
|
| 83 |
+
self.t_block = nn.Sequential(
|
| 84 |
+
nn.SiLU(),
|
| 85 |
+
nn.Linear(self.cfg.width, 6 * self.cfg.width, bias=True)
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# final layer
|
| 89 |
+
self.final_layer = T2IFinalLayer(self.cfg.width, self.cfg.output_channels, self.cfg.use_RMSNorm)
|
| 90 |
+
|
| 91 |
+
self.identity_initialize()
|
| 92 |
+
|
| 93 |
+
if self.cfg.pretrained_model_name_or_path:
|
| 94 |
+
print(f"Loading pretrained model from {self.cfg.pretrained_model_name_or_path}")
|
| 95 |
+
ckpt = torch.load(self.cfg.pretrained_model_name_or_path, map_location="cpu")
|
| 96 |
+
if 'state_dict' in ckpt:
|
| 97 |
+
ckpt = ckpt['state_dict']
|
| 98 |
+
self.denoiser_ckpt = {}
|
| 99 |
+
for k, v in ckpt.items():
|
| 100 |
+
if k.startswith('denoiser_model.'):
|
| 101 |
+
self.denoiser_ckpt[k.replace('denoiser_model.', '')] = v
|
| 102 |
+
self.load_state_dict(self.denoiser_ckpt, strict=False)
|
| 103 |
+
|
| 104 |
+
def identity_initialize(self):
|
| 105 |
+
for block in self.blocks:
|
| 106 |
+
nn.init.constant_(block.attn.c_proj.weight, 0)
|
| 107 |
+
nn.init.constant_(block.attn.c_proj.bias, 0)
|
| 108 |
+
nn.init.constant_(block.cross_attn.c_proj.weight, 0)
|
| 109 |
+
nn.init.constant_(block.cross_attn.c_proj.bias, 0)
|
| 110 |
+
nn.init.constant_(block.mlp.c_proj.weight, 0)
|
| 111 |
+
nn.init.constant_(block.mlp.c_proj.bias, 0)
|
| 112 |
+
|
| 113 |
+
def forward(self,
|
| 114 |
+
model_input: torch.FloatTensor,
|
| 115 |
+
timestep: torch.LongTensor,
|
| 116 |
+
context: torch.FloatTensor):
|
| 117 |
+
|
| 118 |
+
r"""
|
| 119 |
+
Args:
|
| 120 |
+
model_input (torch.FloatTensor): [bs, n_data, c]
|
| 121 |
+
timestep (torch.LongTensor): [bs,]
|
| 122 |
+
context (torch.FloatTensor): [bs, context_tokens, c]
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
sample (torch.FloatTensor): [bs, n_data, c]
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
B, n_data, _ = model_input.shape
|
| 130 |
+
|
| 131 |
+
# 1. time
|
| 132 |
+
t_emb = self.time_embed(timestep)
|
| 133 |
+
|
| 134 |
+
# 2. conditions projector
|
| 135 |
+
context = context.view(B, self.cfg.n_views, -1, self.cfg.context_dim)
|
| 136 |
+
if self.cfg.condition_type == "clip_dinov2":
|
| 137 |
+
clip_feat, dino_feat = context.chunk(2, dim=2)
|
| 138 |
+
clip_cond = self.clip_embed(clip_feat.contiguous().view(B, -1, self.cfg.context_dim))
|
| 139 |
+
dino_cond = self.dino_embed(dino_feat.contiguous().view(B, -1, self.cfg.context_dim))
|
| 140 |
+
visual_cond = self.cfg.clip_weight * clip_cond + self.cfg.dino_weight * dino_cond
|
| 141 |
+
elif self.cfg.condition_type == "clip":
|
| 142 |
+
clip_cond = self.clip_embed(context.contiguous().view(B, -1, self.cfg.context_dim))
|
| 143 |
+
visual_cond = clip_cond
|
| 144 |
+
elif self.cfg.condition_type == "dinov2":
|
| 145 |
+
dino_cond = self.dino_embed(context.contiguous().view(B, -1, self.cfg.context_dim))
|
| 146 |
+
visual_cond = dino_cond
|
| 147 |
+
else:
|
| 148 |
+
raise NotImplementedError(f"condition type {self.cfg.condition_type} not implemented")
|
| 149 |
+
|
| 150 |
+
# 4. denoiser
|
| 151 |
+
latent = self.x_embed(model_input)
|
| 152 |
+
|
| 153 |
+
t0 = self.t_block(t_emb).unsqueeze(dim=1)
|
| 154 |
+
for block in self.blocks:
|
| 155 |
+
latent = auto_grad_checkpoint(block, latent, visual_cond, t0)
|
| 156 |
+
|
| 157 |
+
latent = self.final_layer(latent, t_emb)
|
| 158 |
+
|
| 159 |
+
return latent
|
| 160 |
+
|
Code/Baselines/CraftsMan3D/craftsman/models/denoisers/utils.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import numpy as np
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
from itertools import repeat
|
| 7 |
+
from collections.abc import Iterable
|
| 8 |
+
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
|
| 9 |
+
from timm.models.layers import DropPath
|
| 10 |
+
from craftsman.models.transformers.utils import MLP
|
| 11 |
+
from craftsman.models.transformers.attention import MultiheadAttention, MultiheadCrossAttention
|
| 12 |
+
|
| 13 |
+
class DiTBlock(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A DiT block with adaptive layer norm (adaLN-single) conditioning.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, width, heads, init_scale=1.0, qkv_bias=True, qkv_fuse=False, use_RMSNorm=False, use_flash=True, drop_path=0.0):
|
| 19 |
+
super().__init__()
|
| 20 |
+
if use_RMSNorm:
|
| 21 |
+
self.norm1 = nn.RMSNorm(width, elementwise_affine=True, eps=1e-6)
|
| 22 |
+
else:
|
| 23 |
+
self.norm1 = nn.LayerNorm(width, elementwise_affine=True, eps=1e-6)
|
| 24 |
+
self.attn = MultiheadAttention(
|
| 25 |
+
n_ctx=None,
|
| 26 |
+
width=width,
|
| 27 |
+
heads=heads,
|
| 28 |
+
init_scale=init_scale,
|
| 29 |
+
qkv_bias=qkv_bias,
|
| 30 |
+
qkv_fuse=qkv_fuse,
|
| 31 |
+
use_flash=use_flash
|
| 32 |
+
)
|
| 33 |
+
self.cross_attn = MultiheadCrossAttention(
|
| 34 |
+
n_data=None,
|
| 35 |
+
width=width,
|
| 36 |
+
heads=heads,
|
| 37 |
+
data_width=None,
|
| 38 |
+
init_scale=init_scale,
|
| 39 |
+
qkv_bias=qkv_bias,
|
| 40 |
+
qkv_fuse=qkv_fuse,
|
| 41 |
+
use_flash=use_flash,
|
| 42 |
+
)
|
| 43 |
+
if use_RMSNorm:
|
| 44 |
+
self.norm2 = nn.RMSNorm(width, elementwise_affine=True, eps=1e-6)
|
| 45 |
+
else:
|
| 46 |
+
self.norm2 = nn.LayerNorm(width, elementwise_affine=True, eps=1e-6)
|
| 47 |
+
|
| 48 |
+
self.mlp = MLP(width=width, init_scale=init_scale)
|
| 49 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 50 |
+
self.scale_shift_table = nn.Parameter(torch.randn(6, width) / width ** 0.5)
|
| 51 |
+
|
| 52 |
+
def forward(self, x, y, t, **kwargs):
|
| 53 |
+
B, N, C = x.shape
|
| 54 |
+
|
| 55 |
+
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
|
| 56 |
+
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)).reshape(B, N, C))
|
| 57 |
+
x = x + self.cross_attn(x, y)
|
| 58 |
+
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
|
| 59 |
+
|
| 60 |
+
return x
|
| 61 |
+
|
| 62 |
+
def t2i_modulate(x, shift, scale):
|
| 63 |
+
return x * (1 + scale) + shift
|
| 64 |
+
|
| 65 |
+
def auto_grad_checkpoint(module, *args, **kwargs):
|
| 66 |
+
if getattr(module, 'grad_checkpointing', False):
|
| 67 |
+
if not isinstance(module, Iterable):
|
| 68 |
+
return checkpoint(module, *args, **kwargs)
|
| 69 |
+
gc_step = module[0].grad_checkpointing_step
|
| 70 |
+
return checkpoint_sequential(module, gc_step, *args, **kwargs)
|
| 71 |
+
return module(*args, **kwargs)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class TimestepEmbedder(nn.Module):
|
| 75 |
+
"""
|
| 76 |
+
Embeds scalar timesteps into vector representations.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
def __init__(self, hidden_size, frequency_embedding_size=256):
|
| 80 |
+
super().__init__()
|
| 81 |
+
self.mlp = nn.Sequential(
|
| 82 |
+
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
| 83 |
+
nn.SiLU(),
|
| 84 |
+
nn.Linear(hidden_size, hidden_size, bias=True),
|
| 85 |
+
)
|
| 86 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 87 |
+
|
| 88 |
+
@staticmethod
|
| 89 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 90 |
+
"""
|
| 91 |
+
Create sinusoidal timestep embeddings.
|
| 92 |
+
:param t: a 1-D Tensor of N indices, one per batch element.
|
| 93 |
+
These may be fractional.
|
| 94 |
+
:param dim: the dimension of the output.
|
| 95 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
| 96 |
+
:return: an (N, D) Tensor of positional embeddings.
|
| 97 |
+
"""
|
| 98 |
+
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
| 99 |
+
half = dim // 2
|
| 100 |
+
freqs = torch.exp(
|
| 101 |
+
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)
|
| 102 |
+
args = t[:, None].float() * freqs[None]
|
| 103 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 104 |
+
if dim % 2:
|
| 105 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 106 |
+
return embedding
|
| 107 |
+
|
| 108 |
+
def forward(self, t):
|
| 109 |
+
t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(self.dtype)
|
| 110 |
+
t_emb = self.mlp(t_freq)
|
| 111 |
+
return t_emb
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def dtype(self):
|
| 115 |
+
return next(self.parameters()).dtype
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class FinalLayer(nn.Module):
|
| 119 |
+
"""
|
| 120 |
+
The final layer of DiT.
|
| 121 |
+
"""
|
| 122 |
+
def __init__(self, hidden_size, out_channels):
|
| 123 |
+
super().__init__()
|
| 124 |
+
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
|
| 125 |
+
self.linear = nn.Linear(hidden_size, out_channels, bias=True)
|
| 126 |
+
self.adaLN_modulation = nn.Sequential(
|
| 127 |
+
nn.SiLU(),
|
| 128 |
+
nn.Linear(hidden_size, 2 * hidden_size, bias=True)
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def forward(self, x, c):
|
| 132 |
+
shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
|
| 133 |
+
x = t2i_modulate(self.norm_final(x), shift, scale)
|
| 134 |
+
x = self.linear(x)
|
| 135 |
+
return x
|
| 136 |
+
|
| 137 |
+
class T2IFinalLayer(nn.Module):
|
| 138 |
+
"""
|
| 139 |
+
The final layer of PixArt.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __init__(self, hidden_size, out_channels, use_RMSNorm=False):
|
| 143 |
+
super().__init__()
|
| 144 |
+
if use_RMSNorm:
|
| 145 |
+
self.norm_final = nn.RMSNorm(hidden_size, elementwise_affine=True, eps=1e-6)
|
| 146 |
+
else:
|
| 147 |
+
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
|
| 148 |
+
self.linear = nn.Linear(hidden_size, out_channels, bias=True)
|
| 149 |
+
self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size ** 0.5)
|
| 150 |
+
self.out_channels = out_channels
|
| 151 |
+
|
| 152 |
+
def forward(self, x, t):
|
| 153 |
+
shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1)
|
| 154 |
+
x = t2i_modulate(self.norm_final(x), shift, scale)
|
| 155 |
+
x = self.linear(x)
|
| 156 |
+
return x
|
Code/Baselines/CraftsMan3D/craftsman/models/geometry/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
base
|
| 3 |
+
)
|