File size: 1,531 Bytes
a1482ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab97d9b
 
 
 
 
 
 
897c94b
 
a1482ff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
{
  "anatomical_attention_bias": 2.0,
  "architectures": [
    "LanaForConditionalGeneration"
  ],
  "decoder_compute_dtype": "bfloat16",
  "decoder_load_in_4bit": false,
  "dtype": "float32",
  "freeze_segmenter": true,
  "heart_segmenter_checkpoint": "segmenters/heart_segmenter_dinounet_best.pth",
  "image_size": 512,
  "layer_mask_base_kernel_size": 3,
  "layer_mask_kernel_growth": 2,
  "lung_segmenter_checkpoint": "segmenters/lung_segmenter_dinounet_finetuned.pth",
  "mask_size": 32,
  "max_position_embeddings": 2048,
  "model_type": "lana_radgen",
  "num_attention_layers": 12,
  "segmentation_attention_implementation": "sdpa",
  "segmentation_model_name": "facebook/dinov3-convnext-small-pretrain-lvd1689m",
  "text_hidden_size": 768,
  "text_model_name": "gpt2",
  "transformers_version": "5.3.0",
  "use_cache": true,
  "use_segmentation_mask": true,
  "vision_model_name": "facebook/dinov3-vits16-pretrain-lvd1689m",
  "visual_feature_dim": 384,
  "vocab_size": 50257,
  "auto_map": {
    "AutoConfig": "configuration_lana.LanaConfig",
    "AutoModel": "modeling_lana.LanaForConditionalGeneration",
    "AutoProcessor": "processing_lana.LanaProcessor"
  },
  "bundled_vision_model_name": "bundled_backbones/vision_encoder",
  "bundled_segmentation_model_name": "bundled_backbones/segmenter_encoder",
  "bundled_text_model_name": "bundled_backbones/text_decoder",
  "bundled_tokenizer_name": ".",
  "segmenter_weights_in_model_state": true,
  "visual_projection_type": "mlp4"
}