ramu0e commited on
Commit
be6dcbd
·
verified ·
1 Parent(s): bd36d0e

Upload folder using huggingface_hub

Browse files
lam/config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_depth": 5,
3
+ "action_dropout": 0.0,
4
+ "action_hidden_dim": 96,
5
+ "action_obs_dim": 0,
6
+ "action_prev_dim": 10,
7
+ "action_state_dim": 5,
8
+ "action_target_dim": 10,
9
+ "action_wide_dim": 512,
10
+ "architectures": [
11
+ "LAMModel"
12
+ ],
13
+ "decoder_attention_head_dim": 64,
14
+ "decoder_attn_implementation": "flash_attention_2",
15
+ "decoder_encoder_hidden_dim": 5,
16
+ "decoder_eps": 1e-06,
17
+ "decoder_ffn_dim": 768,
18
+ "decoder_freq_dim": 64,
19
+ "decoder_in_channels": 3,
20
+ "decoder_num_attention_heads": 3,
21
+ "decoder_num_layers": 12,
22
+ "decoder_out_channels": 3,
23
+ "decoder_patch_size": [
24
+ 4,
25
+ 4
26
+ ],
27
+ "decoder_pos_embed_seq_len": null,
28
+ "decoder_rope_max_seq_len": 1024,
29
+ "dtype": "bfloat16",
30
+ "encoder_height": 64,
31
+ "encoder_width": 64,
32
+ "fsq_levels": [
33
+ 7,
34
+ 5,
35
+ 5,
36
+ 5,
37
+ 5
38
+ ],
39
+ "initializer_range": 0.02,
40
+ "is_diffusion": true,
41
+ "latent_channels": 5,
42
+ "max_tokens": 256,
43
+ "min_tokens": 1,
44
+ "model_type": "lam",
45
+ "null_latent": 0,
46
+ "transformers_version": "4.57.1",
47
+ "use_tail_drop": true,
48
+ "videomae_config": {
49
+ "attn_drop_rate": 0.0,
50
+ "cos_attn": false,
51
+ "depth": 8,
52
+ "drop_path_rate": 0.0,
53
+ "drop_rate": 0.0,
54
+ "embed_dim": 192,
55
+ "img_size": [
56
+ 64,
57
+ 64
58
+ ],
59
+ "in_chans": 3,
60
+ "init_values": 0.0,
61
+ "layer_norm_eps": 1e-06,
62
+ "mlp_ratio": 4,
63
+ "norm_layer": "nn.LayerNorm",
64
+ "num_classes": 0,
65
+ "num_frames": 2,
66
+ "num_heads": 3,
67
+ "patch_size": 4,
68
+ "qk_scale": null,
69
+ "qkv_bias": true,
70
+ "tubelet_size": 2,
71
+ "use_learnable_pos_emb": false,
72
+ "use_mean_pooling": false,
73
+ "with_cp": false
74
+ },
75
+ "videomae_from_pretrained": null,
76
+ "vocab_size": 4375
77
+ }
lam/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f58340de8dcfd01cbbc4968eada2a3d816adf93c1fc6ee6b27cdc71842cf26
3
+ size 24337752
model_index.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "LAMPipeline",
3
+ "_diffusers_version": "0.35.2",
4
+ "lam": [
5
+ "flexlam_mini.models.lam.modeling_lam",
6
+ "LAMModel"
7
+ ],
8
+ "processor": [
9
+ "flexlam_mini.models.lam.processing_lam",
10
+ "LAMProcessorFast"
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "FlowMatchEulerDiscreteScheduler"
15
+ ]
16
+ }
processor/processor_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "encoder_height": 64,
3
+ "encoder_width": 64,
4
+ "height": 64,
5
+ "processor_class": "LAMProcessorFast",
6
+ "width": 64
7
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
3
+ "_diffusers_version": "0.35.2",
4
+ "base_image_seq_len": 256,
5
+ "base_shift": 0.5,
6
+ "invert_sigmas": false,
7
+ "max_image_seq_len": 4096,
8
+ "max_shift": 1.15,
9
+ "num_train_timesteps": 1000,
10
+ "shift": 1.0,
11
+ "shift_terminal": null,
12
+ "stochastic_sampling": false,
13
+ "time_shift_type": "exponential",
14
+ "use_beta_sigmas": false,
15
+ "use_dynamic_shifting": false,
16
+ "use_exponential_sigmas": false,
17
+ "use_karras_sigmas": false
18
+ }