euhidaman commited on
Commit
5be3c56
·
verified ·
1 Parent(s): 2ad18d3

BitMar 100M tokens - Epoch 1 - 99,686,013 tokens processed

Browse files
Files changed (4) hide show
  1. README.md +43 -0
  2. config.json +19 -0
  3. pytorch_model.bin +3 -0
  4. training_metadata.json +261 -0
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: mit
4
+ tags:
5
+ - bitmar
6
+ - multimodal
7
+ - babylm
8
+ - cross-modal
9
+ datasets:
10
+ - babylm_multimodal
11
+ metrics:
12
+ - bleu
13
+ - cross_modal_similarity
14
+ ---
15
+
16
+ # BitMar 100M Token Model
17
+
18
+ This model was trained on exactly 100 million tokens as part of the BabyLM challenge.
19
+
20
+ ## Training Details
21
+ - Total tokens: 100,000,000
22
+ - Epochs completed: 1
23
+ - Tokens processed: 99,686,013
24
+ - Cross-modal similarity: 0.3418
25
+
26
+ ## Model Architecture
27
+ - Text encoder: 4 layers, 128 hidden size
28
+ - Vision encoder: DiNOv2 features compressed to 128
29
+ - Episodic memory: 32 slots
30
+
31
+ ## Usage
32
+ ```python
33
+ from transformers import AutoModel, AutoTokenizer
34
+
35
+ model = AutoModel.from_pretrained("euhidaman/bitmar-attention-multimodal")
36
+ tokenizer = AutoTokenizer.from_pretrained("euhidaman/bitmar-attention-multimodal")
37
+ ```
38
+
39
+
40
+ ## Training Status
41
+ - **Status**: In Progress (Epoch 1)
42
+ - **Tokens Processed**: 99,686,013
43
+ - **Best Cross-modal Similarity**: 0.3418
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BitMarModel"
4
+ ],
5
+ "model_type": "bitmar",
6
+ "vocab_size": 50257,
7
+ "text_encoder_dim": 128,
8
+ "text_encoder_layers": 4,
9
+ "text_encoder_heads": 4,
10
+ "vision_encoder_dim": 768,
11
+ "vision_latent_size": 128,
12
+ "fusion_hidden_size": 128,
13
+ "memory_size": 32,
14
+ "episode_dim": 128,
15
+ "max_seq_len": 256,
16
+ "dropout": 0.15,
17
+ "torch_dtype": "float32",
18
+ "transformers_version": "4.0.0"
19
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90cd9981271cc1f56d76c5ddecec018cc2f28c749cce233eb1cbaf9b35552e0
3
+ size 86128991
training_metadata.json ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0,
3
+ "global_step": 99498,
4
+ "tokens_processed": 99686013,
5
+ "target_tokens": 100000000,
6
+ "best_similarity": 0.34183505177497864,
7
+ "training_config": {
8
+ "model": {
9
+ "vocab_size": 50257,
10
+ "text_encoder_dim": 128,
11
+ "text_encoder_layers": 4,
12
+ "text_encoder_heads": 4,
13
+ "text_decoder_dim": 128,
14
+ "text_decoder_layers": 4,
15
+ "text_decoder_heads": 4,
16
+ "vision_encoder_dim": 768,
17
+ "vision_latent_size": 128,
18
+ "vision_hidden_size": 64,
19
+ "vision_compression_method": "learned_compression",
20
+ "vision_spatial_pooling": true,
21
+ "vision_pool_size": 2,
22
+ "fusion_hidden_size": 128,
23
+ "fusion_num_heads": 4,
24
+ "fusion_num_layers": 2,
25
+ "memory_size": 32,
26
+ "episode_dim": 128,
27
+ "memory_alpha": 0.2,
28
+ "direct_writing": true,
29
+ "memory_compression": true,
30
+ "enable_adaptive_training": true,
31
+ "max_seq_len": 256,
32
+ "dropout": 0.15
33
+ },
34
+ "token_constraints": {
35
+ "total_tokens": 100000000,
36
+ "caption_tokens": 50000000,
37
+ "text_tokens": 50000000,
38
+ "enforce_exact_count": true,
39
+ "uniform_sampling": true,
40
+ "alignment_priority": "perfect_alignment",
41
+ "preserve_image_caption_pairs": true,
42
+ "strict_alignment_validation": true
43
+ },
44
+ "vision_feature_reduction": {
45
+ "enabled": true,
46
+ "method": "learned_compression",
47
+ "target_dim": 64,
48
+ "spatial_pooling": true,
49
+ "pool_method": "attention",
50
+ "hidden_dim": 128,
51
+ "learnable": true,
52
+ "preserve_spatial_info": true
53
+ },
54
+ "data": {
55
+ "dataset_dir": "../babylm_dataset",
56
+ "text_encoder_name": "gpt2",
57
+ "max_seq_length": 256,
58
+ "count_tokens": true,
59
+ "target_caption_tokens": 50000000,
60
+ "target_text_tokens": 50000000,
61
+ "token_counting_method": "gpt2",
62
+ "batch_size": 64,
63
+ "num_workers": 6,
64
+ "pin_memory": true,
65
+ "persistent_workers": true,
66
+ "mix_ratio": 0.5,
67
+ "shuffle_datasets": true,
68
+ "ensure_alignment": true,
69
+ "validate_alignment": true,
70
+ "alignment_verification": "strict",
71
+ "never_break_pairs": true,
72
+ "alignment_check_frequency": 1000,
73
+ "use_validation": false,
74
+ "train_only": true
75
+ },
76
+ "attention_analysis": {
77
+ "track_top_k": 5,
78
+ "log_every_n_steps": 200,
79
+ "viz_every_n_epochs": 3,
80
+ "save_head_patterns": true,
81
+ "analyze_memory_attention": true,
82
+ "analyze_cross_modal": true,
83
+ "track_token_alignment": true
84
+ },
85
+ "adaptive_training": {
86
+ "enabled": true,
87
+ "similarity_window_size": 200,
88
+ "drop_threshold": 0.12,
89
+ "min_steps_between_interventions": 800,
90
+ "freeze_duration_steps": 1500,
91
+ "loss_rebalance_factor": 2.0,
92
+ "similarity_smoothing_alpha": 0.15
93
+ },
94
+ "training": {
95
+ "max_epochs": 10,
96
+ "accumulate_grad_batches": 2,
97
+ "gradient_clip_val": 0.3,
98
+ "val_check_interval": 1000,
99
+ "scheduler": "cosine_with_restarts",
100
+ "min_lr": 5e-05,
101
+ "warmup_steps": 1000,
102
+ "learning_rate": 0.0002,
103
+ "weight_decay": 0.02,
104
+ "optimizer": "adamw8bit",
105
+ "scheduler_config": {
106
+ "T_0": 1000,
107
+ "T_mult": 2,
108
+ "eta_min_ratio": 0.1
109
+ },
110
+ "cross_modal_loss_weight": 1.5,
111
+ "text_generation_loss_weight": 1.0,
112
+ "memory_regularization_weight": 0.1,
113
+ "alignment_consistency_weight": 0.5,
114
+ "track_token_usage": true,
115
+ "log_token_progress": true,
116
+ "stop_at_token_limit": false,
117
+ "validate_alignment_every_n_steps": 500,
118
+ "log_alignment_metrics": true,
119
+ "alignment_loss_scaling": "adaptive"
120
+ },
121
+ "wandb": {
122
+ "project": "bitmar-100M-attention-epochs",
123
+ "entity": "babylm-ntust",
124
+ "api_key": null,
125
+ "log_every_n_steps": 100,
126
+ "log_attention": true,
127
+ "log_memory": true,
128
+ "log_gradients": true,
129
+ "log_token_usage": true,
130
+ "log_cross_modal_similarity": true,
131
+ "log_alignment_quality": true,
132
+ "log_caption_image_matching": true,
133
+ "save_code": true,
134
+ "create_plots": true,
135
+ "plot_attention_heatmaps": true,
136
+ "plot_memory_usage": true,
137
+ "plot_token_distribution": true,
138
+ "plot_alignment_metrics": true,
139
+ "log_memory_evolution": true,
140
+ "plot_memory_evolution_heatmap": true,
141
+ "plot_memory_diversity": true,
142
+ "plot_memory_access_patterns": true,
143
+ "memory_visualization_frequency": 5000,
144
+ "memory_snapshot_frequency": 10000,
145
+ "track_memory_metrics": [
146
+ "memory_diversity_score",
147
+ "memory_specialization_score",
148
+ "memory_usage_entropy",
149
+ "cross_modal_memory_ratio",
150
+ "memory_slot_utilization",
151
+ "memory_update_frequency",
152
+ "memory_retrieval_accuracy"
153
+ ]
154
+ },
155
+ "evaluation": {
156
+ "metrics": [
157
+ "bleu",
158
+ "rouge",
159
+ "cross_modal_similarity",
160
+ "memory_efficiency"
161
+ ],
162
+ "generate_samples": true,
163
+ "num_samples": 20,
164
+ "max_generation_length": 32,
165
+ "temperature": 0.8,
166
+ "top_p": 0.9,
167
+ "evaluate_alignment": true,
168
+ "alignment_metrics": [
169
+ "cosine_similarity",
170
+ "retrieval_accuracy",
171
+ "caption_image_matching",
172
+ "cross_modal_retrieval"
173
+ ],
174
+ "alignment_threshold": 0.8,
175
+ "validate_pairs_during_eval": true
176
+ },
177
+ "output": {
178
+ "checkpoint_dir": "checkpoints_100M_dataset",
179
+ "log_dir": "logs_100M_dataset",
180
+ "attention_dir": "attention_100M_dataset",
181
+ "memory_dir": "memory_100M_dataset",
182
+ "results_dir": "results_100M_dataset",
183
+ "token_logs_dir": "token_logs_100M_dataset"
184
+ },
185
+ "memory_optimization": {
186
+ "use_gradient_checkpointing": true,
187
+ "use_fp16": true,
188
+ "use_int8_vision": false,
189
+ "empty_cache_frequency": 10,
190
+ "max_memory_slots_in_ram": 16,
191
+ "compress_episodic_memory": true,
192
+ "vision_feature_caching": false,
193
+ "vision_batch_processing": true,
194
+ "tie_word_embeddings": true,
195
+ "use_shared_attention": false
196
+ },
197
+ "performance_targets": {
198
+ "max_model_size_mb": 50,
199
+ "target_cross_modal_similarity": 0.75,
200
+ "target_text_generation_quality": 0.6,
201
+ "memory_efficiency_threshold": 0.8
202
+ },
203
+ "flops_tracking": {
204
+ "enabled": true,
205
+ "log_frequency": 100,
206
+ "save_statistics": true,
207
+ "estimate_theoretical": true,
208
+ "track_peak_performance": true,
209
+ "log_to_wandb": true,
210
+ "detailed_breakdown": true,
211
+ "memory_bandwidth_tracking": false,
212
+ "efficiency_analysis": true,
213
+ "track_components": [
214
+ "attention",
215
+ "feedforward",
216
+ "layer_norm",
217
+ "embeddings",
218
+ "vision_encoder",
219
+ "cross_modal_fusion"
220
+ ]
221
+ },
222
+ "token_tracking": {
223
+ "log_frequency": 1000,
224
+ "save_token_distribution": true,
225
+ "monitor_caption_text_ratio": true,
226
+ "enforce_token_limits": false,
227
+ "early_stopping_on_limit": false,
228
+ "track_alignment_quality": true,
229
+ "log_misaligned_samples": true,
230
+ "alignment_quality_threshold": 0.7,
231
+ "save_alignment_statistics": true,
232
+ "correlate_flops_with_tokens": true,
233
+ "log_computational_efficiency": true,
234
+ "track_throughput_vs_quality": true
235
+ },
236
+ "huggingface_hub": {
237
+ "enabled": true,
238
+ "repo_id": "euhidaman/bitmar-attention-multimodal",
239
+ "private": true,
240
+ "upload_after_epoch": true,
241
+ "upload_final_model": true,
242
+ "commit_message_template": "BitMar 100M tokens - Epoch {epoch} - {tokens_processed:,} tokens processed",
243
+ "create_model_card": true,
244
+ "model_card_template": "---\nlanguage: en\nlicense: mit\ntags:\n- bitmar\n- multimodal\n- babylm\n- cross-modal\ndatasets:\n- babylm_multimodal\nmetrics:\n- bleu\n- cross_modal_similarity\n---\n\n# BitMar 100M Token Model\n\nThis model was trained on exactly 100 million tokens as part of the BabyLM challenge.\n\n## Training Details\n- Total tokens: 100,000,000\n- Epochs completed: {epoch}\n- Tokens processed: {tokens_processed:,}\n- Cross-modal similarity: {best_similarity:.4f}\n\n## Model Architecture\n- Text encoder: {text_encoder_layers} layers, {text_encoder_dim} hidden size\n- Vision encoder: DiNOv2 features compressed to {vision_latent_size}\n- Episodic memory: {memory_size} slots\n\n## Usage\n```python\nfrom transformers import AutoModel, AutoTokenizer\n\nmodel = AutoModel.from_pretrained(\"{repo_id}\")\ntokenizer = AutoTokenizer.from_pretrained(\"{repo_id}\")\n```\n"
245
+ },
246
+ "attention_sinks": {
247
+ "enabled": true,
248
+ "attention_sink_size": 4,
249
+ "attention_sink_window_size": 1020,
250
+ "inject_to_text_encoder": true,
251
+ "inject_to_text_decoder": true,
252
+ "position_shift_enabled": true,
253
+ "cache_compression": true,
254
+ "adaptive_window_size": false,
255
+ "memory_efficient_attention": true,
256
+ "preserve_episodic_memory": true,
257
+ "preserve_quantization": true,
258
+ "preserve_cross_modal_fusion": true
259
+ }
260
+ }
261
+ }