ankanmbz commited on
Commit
ea7eef0
·
verified ·
1 Parent(s): 006bb02

Upload siglip checkpoint-3000

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. siglip-checkpoint-3000/chat_template.jinja +5 -0
  2. siglip-checkpoint-3000/config.json +68 -0
  3. siglip-checkpoint-3000/configuration_llada.py +175 -0
  4. siglip-checkpoint-3000/latest +1 -0
  5. siglip-checkpoint-3000/model-00001-of-00004.safetensors +3 -0
  6. siglip-checkpoint-3000/model-00002-of-00004.safetensors +3 -0
  7. siglip-checkpoint-3000/model-00003-of-00004.safetensors +3 -0
  8. siglip-checkpoint-3000/model-00004-of-00004.safetensors +3 -0
  9. siglip-checkpoint-3000/model.safetensors.index.json +0 -0
  10. siglip-checkpoint-3000/rng_state_0.pth +3 -0
  11. siglip-checkpoint-3000/rng_state_1.pth +3 -0
  12. siglip-checkpoint-3000/rng_state_10.pth +3 -0
  13. siglip-checkpoint-3000/rng_state_11.pth +3 -0
  14. siglip-checkpoint-3000/rng_state_12.pth +3 -0
  15. siglip-checkpoint-3000/rng_state_13.pth +3 -0
  16. siglip-checkpoint-3000/rng_state_14.pth +3 -0
  17. siglip-checkpoint-3000/rng_state_15.pth +3 -0
  18. siglip-checkpoint-3000/rng_state_16.pth +3 -0
  19. siglip-checkpoint-3000/rng_state_17.pth +3 -0
  20. siglip-checkpoint-3000/rng_state_18.pth +3 -0
  21. siglip-checkpoint-3000/rng_state_19.pth +3 -0
  22. siglip-checkpoint-3000/rng_state_2.pth +3 -0
  23. siglip-checkpoint-3000/rng_state_20.pth +3 -0
  24. siglip-checkpoint-3000/rng_state_21.pth +3 -0
  25. siglip-checkpoint-3000/rng_state_22.pth +3 -0
  26. siglip-checkpoint-3000/rng_state_23.pth +3 -0
  27. siglip-checkpoint-3000/rng_state_24.pth +3 -0
  28. siglip-checkpoint-3000/rng_state_25.pth +3 -0
  29. siglip-checkpoint-3000/rng_state_26.pth +3 -0
  30. siglip-checkpoint-3000/rng_state_27.pth +3 -0
  31. siglip-checkpoint-3000/rng_state_28.pth +3 -0
  32. siglip-checkpoint-3000/rng_state_29.pth +3 -0
  33. siglip-checkpoint-3000/rng_state_3.pth +3 -0
  34. siglip-checkpoint-3000/rng_state_30.pth +3 -0
  35. siglip-checkpoint-3000/rng_state_31.pth +3 -0
  36. siglip-checkpoint-3000/rng_state_32.pth +3 -0
  37. siglip-checkpoint-3000/rng_state_33.pth +3 -0
  38. siglip-checkpoint-3000/rng_state_34.pth +3 -0
  39. siglip-checkpoint-3000/rng_state_35.pth +3 -0
  40. siglip-checkpoint-3000/rng_state_36.pth +3 -0
  41. siglip-checkpoint-3000/rng_state_37.pth +3 -0
  42. siglip-checkpoint-3000/rng_state_38.pth +3 -0
  43. siglip-checkpoint-3000/rng_state_39.pth +3 -0
  44. siglip-checkpoint-3000/rng_state_4.pth +3 -0
  45. siglip-checkpoint-3000/rng_state_40.pth +3 -0
  46. siglip-checkpoint-3000/rng_state_41.pth +3 -0
  47. siglip-checkpoint-3000/rng_state_42.pth +3 -0
  48. siglip-checkpoint-3000/rng_state_43.pth +3 -0
  49. siglip-checkpoint-3000/rng_state_44.pth +3 -0
  50. siglip-checkpoint-3000/rng_state_45.pth +3 -0
siglip-checkpoint-3000/chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}
siglip-checkpoint-3000/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_faster_video": false,
3
+ "add_time_instruction": false,
4
+ "architectures": [
5
+ "LlavaLLaDAModelLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "auto_map": {
10
+ "AutoConfig": "configuration_llada.LLaDAConfig",
11
+ "AutoModel": "modeling_llada.LLaDAModelLM",
12
+ "AutoModelForCausalLM": "modeling_llada.LLaDAModelLM"
13
+ },
14
+ "bos_token_id": 126080,
15
+ "dtype": "bfloat16",
16
+ "embedding_loss_lambda": 0.5,
17
+ "embedding_loss_type": "infonce_learnable",
18
+ "embedding_pool_strategy": "response_tokens",
19
+ "eos_token_id": 126081,
20
+ "faster_token_stride": 10,
21
+ "force_sample": false,
22
+ "hidden_act": "silu",
23
+ "hidden_size": 4096,
24
+ "image_aspect_ratio": "square",
25
+ "image_crop_resolution": null,
26
+ "image_grid_pinpoints": null,
27
+ "image_split_resolution": null,
28
+ "initializer_range": 0.02,
29
+ "intermediate_size": 12288,
30
+ "max_position_embeddings": 16384,
31
+ "mm_hidden_size": 1152,
32
+ "mm_newline_position": "grid",
33
+ "mm_patch_merge_type": "flat",
34
+ "mm_projector_lr": null,
35
+ "mm_projector_type": "mlp2x_gelu",
36
+ "mm_resampler_type": null,
37
+ "mm_spatial_pool_mode": "bilinear",
38
+ "mm_spatial_pool_stride": null,
39
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model,embedding_projection",
40
+ "mm_use_im_patch_token": false,
41
+ "mm_use_im_start_end": false,
42
+ "mm_vision_select_feature": "patch",
43
+ "mm_vision_select_layer": -2,
44
+ "mm_vision_tower": "google/siglip2-so400m-patch14-384",
45
+ "mm_vision_tower_lr": 1e-06,
46
+ "model_type": "llada",
47
+ "num_attention_heads": 32,
48
+ "num_hidden_layers": 32,
49
+ "num_key_value_heads": 32,
50
+ "pad_token_id": 126081,
51
+ "pos_skipping_range": 4096,
52
+ "pretraining_tp": 1,
53
+ "rms_norm_eps": 1e-05,
54
+ "rope_scaling": null,
55
+ "rope_theta": 500000.0,
56
+ "tie_word_embeddings": false,
57
+ "tokenizer_model_max_length": 2048,
58
+ "tokenizer_padding_side": "right",
59
+ "transformers_version": "4.57.1",
60
+ "use_auxiliary_embedding_loss": true,
61
+ "use_cache": false,
62
+ "use_mm_proj": true,
63
+ "use_pos_skipping": false,
64
+ "vision_tower_pretrained": null,
65
+ "vocab_size": 126349,
66
+ "y_encoder_dim": 1024,
67
+ "y_encoder_name": "abhinand/MedEmbed-large-v0.1"
68
+ }
siglip-checkpoint-3000/configuration_llada.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ LLaDA model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ LLaDA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class LLaDAConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`LLaDAModel`]. It is used to instantiate an LLaDA
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the LLaDA-8B.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32000):
43
+ Vocabulary size of the LLaDA model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`LLaDAModel`]
45
+ hidden_size (`int`, *optional*, defaults to 4096):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 11008):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
64
+ The maximum sequence length that this model might ever be used with.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*):
73
+ Padding token id.
74
+ bos_token_id (`int`, *optional*, defaults to 1):
75
+ Beginning of stream token id.
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ End of stream token id.
78
+ pretraining_tp (`int`, *optional*, defaults to 1):
79
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
80
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is
81
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
82
+ issue](https://github.com/pytorch/pytorch/issues/76232).
83
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
84
+ Whether to tie weight embeddings
85
+ rope_theta (`float`, *optional*, defaults to 10000.0):
86
+ The base period of the RoPE embeddings.
87
+ rope_scaling (`Dict`, *optional*):
88
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
89
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
90
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
91
+ `max_position_embeddings` to the expected new maximum.
92
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
93
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
94
+ attention_dropout (`float`, *optional*, defaults to 0.0):
95
+ The dropout ratio for the attention probabilities.
96
+ """
97
+
98
+ model_type = "llada"
99
+ keys_to_ignore_at_inference = ["past_key_values"]
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=32000,
104
+ hidden_size=4096,
105
+ intermediate_size=11008,
106
+ num_hidden_layers=32,
107
+ num_attention_heads=32,
108
+ num_key_value_heads=None,
109
+ hidden_act="silu",
110
+ max_position_embeddings=2048,
111
+ initializer_range=0.02,
112
+ rms_norm_eps=1e-6,
113
+ use_cache=True,
114
+ pad_token_id=None,
115
+ bos_token_id=1,
116
+ eos_token_id=2,
117
+ pretraining_tp=1,
118
+ tie_word_embeddings=False,
119
+ rope_theta=10000.0,
120
+ rope_scaling=None,
121
+ attention_bias=False,
122
+ attention_dropout=0.0,
123
+ **kwargs,
124
+ ):
125
+ self.vocab_size = vocab_size
126
+ self.max_position_embeddings = max_position_embeddings
127
+ self.hidden_size = hidden_size
128
+ self.intermediate_size = intermediate_size
129
+ self.num_hidden_layers = num_hidden_layers
130
+ self.num_attention_heads = num_attention_heads
131
+
132
+ # for backward compatibility
133
+ if num_key_value_heads is None:
134
+ num_key_value_heads = num_attention_heads
135
+
136
+ self.num_key_value_heads = num_key_value_heads
137
+ self.hidden_act = hidden_act
138
+ self.initializer_range = initializer_range
139
+ self.rms_norm_eps = rms_norm_eps
140
+ self.pretraining_tp = pretraining_tp
141
+ self.use_cache = use_cache
142
+ self.rope_theta = rope_theta
143
+ self.rope_scaling = rope_scaling
144
+ self._rope_scaling_validation()
145
+ self.attention_bias = attention_bias
146
+ self.attention_dropout = attention_dropout
147
+
148
+ super().__init__(
149
+ pad_token_id=pad_token_id,
150
+ bos_token_id=bos_token_id,
151
+ eos_token_id=eos_token_id,
152
+ tie_word_embeddings=tie_word_embeddings,
153
+ **kwargs,
154
+ )
155
+
156
+ def _rope_scaling_validation(self):
157
+ """
158
+ Validate the `rope_scaling` configuration.
159
+ """
160
+ if self.rope_scaling is None:
161
+ return
162
+
163
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
164
+ raise ValueError(
165
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
166
+ f"got {self.rope_scaling}"
167
+ )
168
+ rope_scaling_type = self.rope_scaling.get("type", None)
169
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
170
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
171
+ raise ValueError(
172
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
173
+ )
174
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
175
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
siglip-checkpoint-3000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step3000
siglip-checkpoint-3000/model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af770f4edc62abdc4419b4f3fa839f004c99a0012dbc9f42b469ec02229eb859
3
+ size 4994631072
siglip-checkpoint-3000/model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9653a0a14bff8ebd8c8f72f1b4ba0ea7cfb7d5850a855fbd923e67cfcb6c97f1
3
+ size 4999802600
siglip-checkpoint-3000/model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:518053600e6083e39d36a57f1ba63eaf8757111e07cbc58169f70886c54e30ac
3
+ size 4999827272
siglip-checkpoint-3000/model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac1a285b0b557a369bd621f4517ada37d61fa0ac9ed2e5915ed4bd49e34d0e48
3
+ size 2564944658
siglip-checkpoint-3000/model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
siglip-checkpoint-3000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b509c0b70892d7832c63382f21da5a9a8484335ebf733111ccb95420a3ae377e
3
+ size 16389
siglip-checkpoint-3000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1422676a5189b429fc76726b69beebd1cafcb57c60b51175bcf8e2dd7a4b42c
3
+ size 16389
siglip-checkpoint-3000/rng_state_10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:788334f80ef249d7d8ab27bd470f3e38d1b63f4d248ae8022de6f3860f372cad
3
+ size 16404
siglip-checkpoint-3000/rng_state_11.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:745633849e3c0837e93ea5e1905e208db6c1d263a26f8993e085202df1c8e3e5
3
+ size 16404
siglip-checkpoint-3000/rng_state_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22a95e7e38c79401af8bd022300cfeb077edd9b04e2e517f71b8356519c8debc
3
+ size 16404
siglip-checkpoint-3000/rng_state_13.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa5af197f9bb13c639420c8de850c5a5effeae6c9c80e0ceb2ef62c5fd0f8ada
3
+ size 16404
siglip-checkpoint-3000/rng_state_14.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2abeb83690f0fc393168070f9c2979924b2d630bceb344080b84796bf7cc8d79
3
+ size 16404
siglip-checkpoint-3000/rng_state_15.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d661a98ca734e96bc657741176a70fbf4d947147cfb193013b360f4831459023
3
+ size 16404
siglip-checkpoint-3000/rng_state_16.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0fb8f5e927e718cb497301319b5cf99e17ff479257fec0b0998523519639083
3
+ size 16404
siglip-checkpoint-3000/rng_state_17.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d4c29ac137fe32393605b05101fdc1a948cc4c5a6a41b59eb2838e00e87f1e7
3
+ size 16404
siglip-checkpoint-3000/rng_state_18.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b9e549f46d048046f0f4ff1b8725f317db637845479652f26cab4a48f3e053
3
+ size 16404
siglip-checkpoint-3000/rng_state_19.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4acf773dcff4a6020367796963848e90aa0ce5cb964def62bb1ec0d7179986b2
3
+ size 16404
siglip-checkpoint-3000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:603cc37d78131046b8968d0c980a4fdf8f325337f9e572be908ac92f99617d70
3
+ size 16389
siglip-checkpoint-3000/rng_state_20.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80019fc2b0d59a52e97df26e2d64b8d77a440d3361e4f973413daca5500adee1
3
+ size 16404
siglip-checkpoint-3000/rng_state_21.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a2cc635fb456dc0d62c3486844b13cf4646dca6883b0e4b36de84c26e211dff
3
+ size 16404
siglip-checkpoint-3000/rng_state_22.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d443d814a6c61ff1276fe28cb6132923ae88c6162ff05573b80013a63162e5f
3
+ size 16404
siglip-checkpoint-3000/rng_state_23.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c7fa85bf8a98b9cc37710fd50db2cf2f11769d06e33df739291357de86761bf
3
+ size 16404
siglip-checkpoint-3000/rng_state_24.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ce3c9d9b3e8b852e35df207e35b1afb68b69384861e1e4055ae29d07ff2567e
3
+ size 16404
siglip-checkpoint-3000/rng_state_25.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131f3d2996d8257d6252b9b9f6f4b7e93158798c5bfa81ccf5b3e65a28c3d028
3
+ size 16404
siglip-checkpoint-3000/rng_state_26.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c70177627701b8605beb364b5361d3acaae1e98da5971316717cdd74c3e99605
3
+ size 16404
siglip-checkpoint-3000/rng_state_27.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e093af7dd9344f21ee2b1951e4144e1470fc7e61b6ad91927d3a4df2e37c1fbd
3
+ size 16404
siglip-checkpoint-3000/rng_state_28.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b23599f9f766a8bc2df1ac93833ef8f6355c20bf92e30fcb157f7522e476ae63
3
+ size 16404
siglip-checkpoint-3000/rng_state_29.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f664ca9869260f8ce769a7a0124821d1991a75d00c38d70dc782c90b5eb9349
3
+ size 16404
siglip-checkpoint-3000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bbbd639bfe8bcd2d9f2daf6b39f9e5d08889e8daf293bc85d8282a77b53513
3
+ size 16389
siglip-checkpoint-3000/rng_state_30.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc04bbd3d661147dec7565f6293e8b3d05a5f2d31305f0a5018d195dd8f38d4
3
+ size 16404
siglip-checkpoint-3000/rng_state_31.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b663b2715b97c4af78e22c098ade9c17e81ca74ed0ca2cc8ba016da71cb50ebf
3
+ size 16404
siglip-checkpoint-3000/rng_state_32.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:decd782f4fd28d4cec1a581e912ee6473b48534eb8f59f2b9220d202241d8828
3
+ size 16404
siglip-checkpoint-3000/rng_state_33.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930dde77965261035994a59c493f88d5f67369a88c18767463d8afd862fee453
3
+ size 16404
siglip-checkpoint-3000/rng_state_34.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e169b76c7dcdfbc15455b91564a1be69a91bc820ff6844070acde542a9d8553b
3
+ size 16404
siglip-checkpoint-3000/rng_state_35.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1036608f6cad9612658860d7f5f6014c12fcbb591755aa7606ddcdb0e19f5dc6
3
+ size 16404
siglip-checkpoint-3000/rng_state_36.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8a8c896ececf256aa9561e5547cce3f419038699275a84f3a2896734dc140c
3
+ size 16404
siglip-checkpoint-3000/rng_state_37.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84d51dd4472fd59fd4ac52cecaba5b9acd72ee66d823e6467f555db7e68b630b
3
+ size 16404
siglip-checkpoint-3000/rng_state_38.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb6ecd9fe2ddcc9d6073f5d5c773a85211cdc9d53b638810486c9a33f38bfe3
3
+ size 16404
siglip-checkpoint-3000/rng_state_39.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317cef80a93743c329a58622c0f448cd453cad8b41b1ac1f0cf09d93cd7ba7e8
3
+ size 16404
siglip-checkpoint-3000/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66362706606703a8b908ba5752ae96638144e80a683b7af634db624eb317e507
3
+ size 16389
siglip-checkpoint-3000/rng_state_40.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b933627b7aa5cd9aed0c4922db633be4298b9fd599a382f1c99a3b3d43acbcc
3
+ size 16404
siglip-checkpoint-3000/rng_state_41.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855cc310b5a4ca7c6bc3fe2424c65b5296b41d5941091656a9082a9f40b32c7f
3
+ size 16404
siglip-checkpoint-3000/rng_state_42.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f9e2750a7b67723d16569d4e39a26cab6a870d91fa9871c18b5ca25fa219780
3
+ size 16404
siglip-checkpoint-3000/rng_state_43.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b5c4bbffc392d0ea0ce1cb45376db24e9a918c62f9c21b5d4f1dc648d2fa84
3
+ size 16404
siglip-checkpoint-3000/rng_state_44.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23283d2d75c8893f21addd4395a94aa4c22b16d3964f768250fa30baef56b83d
3
+ size 16404
siglip-checkpoint-3000/rng_state_45.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77a1cf603f3f55e73d4b0a77acc2886a70e86f6da8b425851e5797c71a3d142
3
+ size 16404