eacortes commited on
Commit
89e1b8a
·
verified ·
1 Parent(s): ad4501b

Upload 19 files

Browse files
Files changed (19) hide show
  1. README.md +312 -3
  2. config.json +51 -0
  3. configuration_modchembert.py +84 -0
  4. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_150149.log +343 -0
  5. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_151842.log +343 -0
  6. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250918_170254.log +371 -0
  7. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250922_102753.log +325 -0
  8. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250918_164215.log +351 -0
  9. logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_153929.log +331 -0
  10. logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_151852.log +325 -0
  11. logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250918_165507.log +379 -0
  12. logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_150151.log +363 -0
  13. logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_154545.log +379 -0
  14. logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_155814.log +373 -0
  15. model.safetensors +3 -0
  16. modeling_modchembert.py +554 -0
  17. special_tokens_map.json +37 -0
  18. tokenizer.json +2554 -0
  19. tokenizer_config.json +53 -0
README.md CHANGED
@@ -1,3 +1,312 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - Derify/augmented_canonical_druglike_QED_Pfizer_15M
5
+ metrics:
6
+ - roc_auc
7
+ - rmse
8
+ library_name: transformers
9
+ tags:
10
+ - modernbert
11
+ - ModChemBERT
12
+ - cheminformatics
13
+ - chemical-language-model
14
+ - molecular-property-prediction
15
+ pipeline_tag: fill-mask
16
+ model-index:
17
+ - name: Derify/ModChemBERT-MLM
18
+ results:
19
+ - task:
20
+ type: text-classification
21
+ name: Classification (ROC AUC)
22
+ dataset:
23
+ name: BACE
24
+ type: BACE
25
+ metrics:
26
+ - type: roc_auc
27
+ value: 0.8065
28
+ - task:
29
+ type: text-classification
30
+ name: Classification (ROC AUC)
31
+ dataset:
32
+ name: BBBP
33
+ type: BBBP
34
+ metrics:
35
+ - type: roc_auc
36
+ value: 0.7222
37
+ - task:
38
+ type: text-classification
39
+ name: Classification (ROC AUC)
40
+ dataset:
41
+ name: CLINTOX
42
+ type: CLINTOX
43
+ metrics:
44
+ - type: roc_auc
45
+ value: 0.9709
46
+ - task:
47
+ type: text-classification
48
+ name: Classification (ROC AUC)
49
+ dataset:
50
+ name: HIV
51
+ type: HIV
52
+ metrics:
53
+ - type: roc_auc
54
+ value: 0.7800
55
+ - task:
56
+ type: text-classification
57
+ name: Classification (ROC AUC)
58
+ dataset:
59
+ name: SIDER
60
+ type: SIDER
61
+ metrics:
62
+ - type: roc_auc
63
+ value: 0.6419
64
+ - task:
65
+ type: text-classification
66
+ name: Classification (ROC AUC)
67
+ dataset:
68
+ name: TOX21
69
+ type: TOX21
70
+ metrics:
71
+ - type: roc_auc
72
+ value: 0.7400
73
+ - task:
74
+ type: regression
75
+ name: Regression (RMSE)
76
+ dataset:
77
+ name: BACE
78
+ type: BACE
79
+ metrics:
80
+ - type: rmse
81
+ value: 1.0893
82
+ - task:
83
+ type: regression
84
+ name: Regression (RMSE)
85
+ dataset:
86
+ name: CLEARANCE
87
+ type: CLEARANCE
88
+ metrics:
89
+ - type: rmse
90
+ value: 49.0005
91
+ - task:
92
+ type: regression
93
+ name: Regression (RMSE)
94
+ dataset:
95
+ name: ESOL
96
+ type: ESOL
97
+ metrics:
98
+ - type: rmse
99
+ value: 0.8456
100
+ - task:
101
+ type: regression
102
+ name: Regression (RMSE)
103
+ dataset:
104
+ name: FREESOLV
105
+ type: FREESOLV
106
+ metrics:
107
+ - type: rmse
108
+ value: 0.5491
109
+ - task:
110
+ type: regression
111
+ name: Regression (RMSE)
112
+ dataset:
113
+ name: LIPO
114
+ type: LIPO
115
+ metrics:
116
+ - type: rmse
117
+ value: 0.7147
118
+ ---
119
+
120
+ # ModChemBERT: ModernBERT as a Chemical Language Model
121
+ ModChemBERT is a ModernBERT-based chemical language model (CLM), trained on SMILES strings for masked language modeling (MLM) and downstream molecular property prediction (classification & regression).
122
+
123
+ ## Usage
124
+ ### Load Model
125
+ ```python
126
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
127
+
128
+ model_id = "Derify/ModChemBERT-MLM"
129
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
130
+ model = AutoModelForMaskedLM.from_pretrained(
131
+ model_id,
132
+ trust_remote_code=True,
133
+ dtype="float16",
134
+ device_map="auto",
135
+ )
136
+ ```
137
+
138
+ ### Fill-Mask Pipeline
139
+ ```python
140
+ from transformers import pipeline
141
+
142
+ fill = pipeline("fill-mask", model=model, tokenizer=tokenizer)
143
+ print(fill("c1ccccc1[MASK]"))
144
+ ```
145
+
146
+ ## Intended Use
147
+ * Primary: Research and development for molecular property prediction, experimentation with pooling strategies, and as a foundational model for downstream applications.
148
+ * Appropriate for: Binary / multi-class classification (e.g., toxicity, activity) and single-task or multi-task regression (e.g., solubility, clearance) after fine-tuning.
149
+ * Not intended for generating novel molecules.
150
+
151
+ ## Limitations
152
+ - Out-of-domain performance may degrade for: very long (>128 token) SMILES, inorganic / organometallic compounds, polymers, or charged / enumerated tautomers are not well represented in training.
153
+ - No guarantee of synthesizability, safety, or biological efficacy.
154
+
155
+ ## Ethical Considerations & Responsible Use
156
+ - Potential biases arise from training corpora skewed to drug-like space.
157
+ - Do not deploy in clinical or regulatory settings without rigorous, domain-specific validation.
158
+
159
+ ## Architecture
160
+ - Backbone: ModernBERT
161
+ - Hidden size: 768
162
+ - Intermediate size: 1152
163
+ - Encoder Layers: 22
164
+ - Attention heads: 12
165
+ - Max sequence length: 256 tokens (MLM primarily trained with 128-token sequences)
166
+ - Vocabulary: BPE tokenizer using [MolFormer's vocab](https://github.com/emapco/ModChemBERT/blob/main/modchembert/tokenizers/molformer/vocab.json) (2362 tokens)
167
+
168
+ ## Pooling (Classifier / Regressor Head)
169
+ Kallergis et al. [1] demonstrated that the CLM embedding method prior to the prediction head can significantly impact downstream performance.
170
+
171
+ Behrendt et al. [2] noted that the last few layers contain task-specific information and that pooling methods leveraging information from multiple layers can enhance model performance. Their results further demonstrated that the `max_seq_mha` pooling method was particularly effective in low-data regimes, which is often the case for molecular property prediction tasks.
172
+
173
+ Multiple pooling strategies are supported by ModChemBERT to explore their impact on downstream performance:
174
+ - `cls`: Last layer [CLS]
175
+ - `mean`: Mean over last hidden layer
176
+ - `max_cls`: Max over last k layers of [CLS]
177
+ - `cls_mha`: MHA with [CLS] as query
178
+ - `max_seq_mha`: MHA with max pooled sequence as KV and max pooled [CLS] as query
179
+ - `sum_mean`: Sum over all layers then mean tokens
180
+ - `sum_sum`: Sum over all layers then sum tokens
181
+ - `mean_mean`: Mean over all layers then mean tokens
182
+ - `mean_sum`: Mean over all layers then sum tokens
183
+ - `max_seq_mean`: Max over last k layers then mean tokens
184
+
185
+ ## Training Pipeline
186
+ <div align="center">
187
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/656892962693fa22e18b5331/bxNbpgMkU8m60ypyEJoWQ.png" alt="ModChemBERT Training Pipeline" width="650"/>
188
+ </div>
189
+
190
+ ### Rationale for MTR Stage
191
+ Following Sultan et al. [3], multi-task regression (physicochemical properties) biases the latent space toward ADME-related representations prior to narrow TAFT specialization. Sultan et al. observed that MLM + DAPT (MTR) outperforms MLM-only, MTR-only, and MTR + DAPT (MTR).
192
+
193
+ ### Checkpoint Averaging Motivation
194
+ Inspired by ModernBERT [4], JaColBERTv2.5 [5], and Llama 3.1 [6], where results show that model merging can enhance generalization or performance while mitigating overfitting to any single fine-tune or annealing checkpoint.
195
+
196
+ ## Datasets
197
+ - Pretraining: [Derify/augmented_canonical_druglike_QED_Pfizer_15M](https://huggingface.co/datasets/Derify/augmented_canonical_druglike_QED_Pfizer_15M)
198
+ - Domain Adaptive Pretraining (DAPT) & Task Adaptive Fine-tuning (TAFT): ADME + AstraZeneca datasets (10 tasks) with scaffold splits from DA4MT pipeline (see [domain-adaptation-molecular-transformers](https://github.com/emapco/ModChemBERT/tree/main/domain-adaptation-molecular-transformers))
199
+ - Benchmarking: ChemBERTa-3 [7] tasks (BACE, BBBP, TOX21, HIV, SIDER, CLINTOX for classification; ESOL, FREESOLV, LIPO, BACE, CLEARANCE for regression)
200
+
201
+ ## Benchmarking
202
+ Benchmarks were conducted with the ChemBERTa-3 framework using DeepChem scaffold splits. Each task was trained for 100 epochs with 3 random seeds.
203
+
204
+ ### Evaluation Methodology
205
+ - Classification Metric: ROC AUC.
206
+ - Regression Metric: RMSE.
207
+ - Aggregation: Mean ± standard deviation of the triplicate results.
208
+ - Input Constraints: SMILES truncated / filtered to ≤200 tokens, following the MolFormer paper's recommendation.
209
+
210
+ ### Results
211
+ <details><summary>Click to expand</summary>
212
+
213
+ #### Classification Datasets (ROC AUC - Higher is better)
214
+
215
+ | Model | BACE↑ | BBBP↑ | CLINTOX↑ | HIV↑ | SIDER↑ | TOX21↑ | AVG† |
216
+ | ---------------------------------------------------------------------------- | ----------------- | ----------------- | --------------------- | --------------------- | --------------------- | ----------------- | ------ |
217
+ | **Tasks** | 1 | 1 | 2 | 1 | 27 | 12 | |
218
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 0.781 ± 0.019 | 0.700 ± 0.027 | 0.979 ± 0.022 | 0.740 ± 0.013 | 0.611 ± 0.002 | 0.718 ± 0.011 | 0.7548 |
219
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 0.819 ± 0.019 | 0.735 ± 0.019 | 0.839 ± 0.013 | 0.762 ± 0.005 | 0.618 ± 0.005 | 0.723 ± 0.012 | 0.7493 |
220
+ | MoLFormer-LHPC* | **0.887 ± 0.004** | **0.908 ± 0.013** | 0.993 ± 0.004 | 0.750 ± 0.003 | 0.622 ± 0.007 | **0.791 ± 0.014** | 0.8252 |
221
+ | ------------------------- | ----------------- | ----------------- | ------------------- | ------------------- | ------------------- | ----------------- | ------ |
222
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 0.8065 ± 0.0103 | 0.7222 ± 0.0150 | 0.9709 ± 0.0227 | ***0.7800 ± 0.0133*** | 0.6419 ± 0.0113 | 0.7400 ± 0.0044 | 0.7769 |
223
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.8224 ± 0.0156 | 0.7402 ± 0.0095 | 0.9820 ± 0.0138 | 0.7702 ± 0.0020 | 0.6303 ± 0.0039 | 0.7360 ± 0.0036 | 0.7802 |
224
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 0.7924 ± 0.0155 | 0.7282 ± 0.0058 | 0.9725 ± 0.0213 | 0.7770 ± 0.0047 | 0.6542 ± 0.0128 | *0.7646 ± 0.0039* | 0.7815 |
225
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.8213 ± 0.0051 | 0.7356 ± 0.0094 | 0.9664 ± 0.0202 | 0.7750 ± 0.0048 | 0.6415 ± 0.0094 | 0.7263 ± 0.0036 | 0.7777 |
226
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | *0.8346 ± 0.0045* | *0.7573 ± 0.0120* | ***0.9938 ± 0.0017*** | 0.7737 ± 0.0034 | ***0.6600 ± 0.0061*** | 0.7518 ± 0.0047 | 0.7952 |
227
+
228
+ #### Regression Datasets (RMSE - Lower is better)
229
+
230
+ | Model | BACE↓ | CLEARANCE↓ | ESOL↓ | FREESOLV↓ | LIPO↓ | AVG‡ |
231
+ | ---------------------------------------------------------------------------- | --------------------- | ---------------------- | --------------------- | --------------------- | --------------------- | ---------------- |
232
+ | **Tasks** | 1 | 1 | 1 | 1 | 1 | |
233
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 1.011 ± 0.038 | 51.582 ± 3.079 | 0.920 ± 0.011 | 0.536 ± 0.016 | 0.758 ± 0.013 | 0.8063 / 10.9614 |
234
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 1.094 ± 0.126 | 52.058 ± 2.767 | 0.829 ± 0.019 | 0.572 ± 0.023 | 0.728 ± 0.016 | 0.8058 / 11.0562 |
235
+ | MoLFormer-LHPC* | 1.201 ± 0.100 | 45.74 ± 2.637 | 0.848 ± 0.031 | 0.683 ± 0.040 | 0.895 ± 0.080 | 0.9068 / 9.8734 |
236
+ | ------------------------- | ------------------- | -------------------- | ------------------- | ------------------- | ------------------- | ---------------- |
237
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 1.0893 ± 0.1319 | 49.0005 ± 1.2787 | 0.8456 ± 0.0406 | 0.5491 ± 0.0134 | 0.7147 ± 0.0062 | 0.7997 / 10.4398 |
238
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.9931 ± 0.0258 | 45.4951 ± 0.7112 | 0.9319 ± 0.0153 | 0.6049 ± 0.0666 | 0.6874 ± 0.0040 | 0.8043 / 9.7425 |
239
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 1.0304 ± 0.1146 | 47.8418 ± 0.4070 | ***0.7669 ± 0.0024*** | 0.5293 ± 0.0267 | 0.6708 ± 0.0074 | 0.7493 / 10.1678 |
240
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.9713 ± 0.0224 | ***42.8010 ± 3.3475*** | 0.8169 ± 0.0268 | 0.5445 ± 0.0257 | 0.6820 ± 0.0028 | 0.7537 / 9.1631 |
241
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | ***0.9665 ± 0.0250*** | 44.0137 ± 1.1110 | 0.8158 ± 0.0115 | ***0.4979 ± 0.0158*** | ***0.6505 ± 0.0126*** | 0.7327 / 9.3889 |
242
+
243
+ **Bold** indicates the best result in the column; *italic* indicates the best result among ModChemBERT checkpoints.<br/>
244
+ \* Published results from the ChemBERTa-3 [7] paper for optimized chemical language models using DeepChem scaffold splits.<br/>
245
+ † AVG column shows the mean score across all classification tasks.<br/>
246
+ ‡ AVG column shows the mean scores across all regression tasks without and with the clearance score.
247
+
248
+ </details>
249
+
250
+ ## Optimized ModChemBERT Hyperparameters
251
+
252
+ <details><summary>Click to expand</summary>
253
+
254
+ ### TAFT Datasets
255
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
256
+
257
+ | Dataset | Learning Rate | Batch Size | Warmup Ratio | Classifier Pooling | Last k Layers |
258
+ | ---------------------- | ------------- | ---------- | ------------ | ------------------ | ------------- |
259
+ | adme_microsom_stab_h | 3e-5 | 8 | 0.0 | max_seq_mean | 5 |
260
+ | adme_microsom_stab_r | 3e-5 | 16 | 0.2 | max_cls | 3 |
261
+ | adme_permeability | 3e-5 | 8 | 0.0 | max_cls | 3 |
262
+ | adme_ppb_h | 1e-5 | 32 | 0.1 | max_seq_mean | 5 |
263
+ | adme_ppb_r | 1e-5 | 32 | 0.0 | sum_mean | N/A |
264
+ | adme_solubility | 3e-5 | 32 | 0.0 | sum_mean | N/A |
265
+ | astrazeneca_CL | 3e-5 | 8 | 0.1 | max_seq_mha | 3 |
266
+ | astrazeneca_LogD74 | 1e-5 | 8 | 0.0 | max_seq_mean | 5 |
267
+ | astrazeneca_PPB | 1e-5 | 32 | 0.0 | max_cls | 3 |
268
+ | astrazeneca_Solubility | 1e-5 | 32 | 0.0 | max_seq_mean | 5 |
269
+
270
+ ### Benchmarking Datasets
271
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
272
+
273
+ | Dataset | Batch Size | Classifier Pooling | Last k Layers | Pooling Attention Dropout | Classifier Dropout | Embedding Dropout |
274
+ | ------------------- | ---------- | ------------------ | ------------- | ------------------------- | ------------------ | ----------------- |
275
+ | bace_classification | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
276
+ | bbbp | 64 | max_cls | 3 | 0.1 | 0.0 | 0.0 |
277
+ | clintox | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
278
+ | hiv | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
279
+ | sider | 32 | mean | N/A | 0.1 | 0.0 | 0.1 |
280
+ | tox21 | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
281
+ | base_regression | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
282
+ | clearance | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
283
+ | esol | 64 | sum_mean | N/A | 0.1 | 0.0 | 0.1 |
284
+ | freesolv | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
285
+ | lipo | 32 | max_seq_mha | 3 | 0.1 | 0.1 | 0.1 |
286
+
287
+ </details>
288
+
289
+ ## Hardware
290
+ Training and experiments were performed on 2 NVIDIA RTX 3090 GPUs.
291
+
292
+ ## Citation
293
+ If you use ModChemBERT in your research, please cite the checkpoint and the following:
294
+ ```
295
+ @software{cortes-2025-modchembert,
296
+ author = {Emmanuel Cortes},
297
+ title = {ModChemBERT: ModernBERT as a Chemical Language Model},
298
+ year = {2025},
299
+ publisher = {GitHub},
300
+ howpublished = {GitHub repository},
301
+ url = {https://github.com/emapco/ModChemBERT}
302
+ }
303
+ ```
304
+
305
+ ## References
306
+ 1. Kallergis, Georgios, et al. "Domain adaptable language modeling of chemical compounds identifies potent pathoblockers for Pseudomonas aeruginosa." Communications Chemistry 8.1 (2025): 114.
307
+ 2. Behrendt, Maike, Stefan Sylvius Wagner, and Stefan Harmeling. "MaxPoolBERT: Enhancing BERT Classification via Layer-and Token-Wise Aggregation." arXiv preprint arXiv:2505.15696 (2025).
308
+ 3. Sultan, Afnan, et al. "Transformers for molecular property prediction: Domain adaptation efficiently improves performance." arXiv preprint arXiv:2503.03360 (2025).
309
+ 4. Warner, Benjamin, et al. "Smarter, better, faster, longer: A modern bidirectional encoder for fast, memory efficient, and long context finetuning and inference." arXiv preprint arXiv:2412.13663 (2024).
310
+ 5. Clavié, Benjamin. "JaColBERTv2.5: Optimising Multi-Vector Retrievers to Create State-of-the-Art Japanese Retrievers with Constrained Resources." Journal of Natural Language Processing 32.1 (2025): 176-218.
311
+ 6. Grattafiori, Aaron, et al. "The llama 3 herd of models." arXiv preprint arXiv:2407.21783 (2024).
312
+ 7. Singh, Riya, et al. "ChemBERTa-3: An Open Source Training Framework for Chemical Foundation Models." (2025).
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModChemBertForMaskedLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_modchembert.ModChemBertConfig",
9
+ "AutoModelForMaskedLM": "modeling_modchembert.ModChemBertForMaskedLM"
10
+ },
11
+ "bos_token_id": 0,
12
+ "classifier_activation": "gelu",
13
+ "classifier_bias": false,
14
+ "classifier_dropout": 0.0,
15
+ "classifier_pooling": "max_seq_mha",
16
+ "classifier_pooling_attention_dropout": 0.1,
17
+ "classifier_pooling_last_k": 3,
18
+ "classifier_pooling_num_attention_heads": 4,
19
+ "cls_token_id": 0,
20
+ "decoder_bias": true,
21
+ "deterministic_flash_attn": false,
22
+ "dtype": "float32",
23
+ "embedding_dropout": 0.1,
24
+ "eos_token_id": 1,
25
+ "global_attn_every_n_layers": 3,
26
+ "global_rope_theta": 160000.0,
27
+ "hidden_activation": "gelu",
28
+ "hidden_size": 768,
29
+ "initializer_cutoff_factor": 2.0,
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 1152,
32
+ "layer_norm_eps": 1e-05,
33
+ "local_attention": 8,
34
+ "local_rope_theta": 10000.0,
35
+ "max_position_embeddings": 256,
36
+ "mlp_bias": false,
37
+ "mlp_dropout": 0.1,
38
+ "model_type": "modchembert",
39
+ "norm_bias": false,
40
+ "norm_eps": 1e-05,
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 22,
43
+ "pad_token_id": 2,
44
+ "position_embedding_type": "absolute",
45
+ "repad_logits_with_grad": false,
46
+ "sep_token_id": 1,
47
+ "sparse_pred_ignore_index": -100,
48
+ "sparse_prediction": false,
49
+ "transformers_version": "4.56.1",
50
+ "vocab_size": 2362
51
+ }
configuration_modchembert.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Literal
16
+
17
+ from transformers.models.modernbert.configuration_modernbert import ModernBertConfig
18
+
19
+
20
+ class ModChemBertConfig(ModernBertConfig):
21
+ """
22
+ Configuration class for ModChemBert models.
23
+
24
+ This configuration class extends ModernBertConfig with additional parameters specific to
25
+ chemical molecule modeling and custom pooling strategies for classification/regression tasks.
26
+ It accepts all arguments and keyword arguments from ModernBertConfig.
27
+
28
+ Args:
29
+ classifier_pooling (str, optional): Pooling strategy for sequence classification.
30
+ Available options:
31
+ - "cls": Use CLS token representation
32
+ - "mean": Attention-weighted average pooling
33
+ - "sum_mean": Sum all hidden states across layers, then mean pool over sequence (ChemLM approach)
34
+ - "sum_sum": Sum all hidden states across layers, then sum pool over sequence
35
+ - "mean_mean": Mean all hidden states across layers, then mean pool over sequence
36
+ - "mean_sum": Mean all hidden states across layers, then sum pool over sequence
37
+ - "max_cls": Element-wise max pooling over last k hidden states, then take CLS token
38
+ - "cls_mha": Multi-head attention with CLS token as query and full sequence as keys/values
39
+ - "max_seq_mha": Max pooling over last k states + multi-head attention with CLS as query
40
+ - "max_seq_mean": Max pooling over last k hidden states, then mean pooling over sequence
41
+ Defaults to "sum_mean".
42
+ classifier_pooling_num_attention_heads (int, optional): Number of attention heads for multi-head attention
43
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 4.
44
+ classifier_pooling_attention_dropout (float, optional): Dropout probability for multi-head attention
45
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 0.0.
46
+ classifier_pooling_last_k (int, optional): Number of last hidden layers to use for max pooling
47
+ strategies (max_cls, max_seq_mha, max_seq_mean). Defaults to 8.
48
+ *args: Variable length argument list passed to ModernBertConfig.
49
+ **kwargs: Arbitrary keyword arguments passed to ModernBertConfig.
50
+
51
+ Note:
52
+ This class inherits all configuration parameters from ModernBertConfig including
53
+ hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, etc.
54
+ """
55
+
56
+ model_type = "modchembert"
57
+
58
+ def __init__(
59
+ self,
60
+ *args,
61
+ classifier_pooling: Literal[
62
+ "cls",
63
+ "mean",
64
+ "sum_mean",
65
+ "sum_sum",
66
+ "mean_mean",
67
+ "mean_sum",
68
+ "max_cls",
69
+ "cls_mha",
70
+ "max_seq_mha",
71
+ "max_seq_mean",
72
+ ] = "max_seq_mha",
73
+ classifier_pooling_num_attention_heads: int = 4,
74
+ classifier_pooling_attention_dropout: float = 0.0,
75
+ classifier_pooling_last_k: int = 8,
76
+ **kwargs,
77
+ ):
78
+ # Pass classifier_pooling="cls" to circumvent ValueError in ModernBertConfig init
79
+ super().__init__(*args, classifier_pooling="cls", **kwargs)
80
+ # Override with custom value
81
+ self.classifier_pooling = classifier_pooling
82
+ self.classifier_pooling_num_attention_heads = classifier_pooling_num_attention_heads
83
+ self.classifier_pooling_attention_dropout = classifier_pooling_attention_dropout
84
+ self.classifier_pooling_last_k = classifier_pooling_last_k
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_150149.log ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:01:49,271 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_classification
2
+ 2025-09-18 15:01:49,271 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - dataset: bace_classification, tasks: ['Class'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 15:01:49,275 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_classification at 2025-09-18_15-01-49
4
+ 2025-09-18 15:01:55,193 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.6349 | Val mean-roc_auc_score: 0.6510
5
+ 2025-09-18 15:01:55,193 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 15:01:56,013 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6510
7
+ 2025-09-18 15:01:59,836 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4539 | Val mean-roc_auc_score: 0.7104
8
+ 2025-09-18 15:02:00,032 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
9
+ 2025-09-18 15:02:00,519 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7104
10
+ 2025-09-18 15:02:04,198 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3839 | Val mean-roc_auc_score: 0.7060
11
+ 2025-09-18 15:02:08,034 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.3224 | Val mean-roc_auc_score: 0.6953
12
+ 2025-09-18 15:02:11,480 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2681 | Val mean-roc_auc_score: 0.7271
13
+ 2025-09-18 15:02:11,649 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 190
14
+ 2025-09-18 15:02:12,136 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7271
15
+ 2025-09-18 15:02:13,304 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2656 | Val mean-roc_auc_score: 0.7285
16
+ 2025-09-18 15:02:13,768 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 228
17
+ 2025-09-18 15:02:14,282 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7285
18
+ 2025-09-18 15:02:17,979 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2171 | Val mean-roc_auc_score: 0.6990
19
+ 2025-09-18 15:02:21,699 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2412 | Val mean-roc_auc_score: 0.7600
20
+ 2025-09-18 15:02:21,930 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 304
21
+ 2025-09-18 15:02:23,050 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.7600
22
+ 2025-09-18 15:02:26,993 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1891 | Val mean-roc_auc_score: 0.6927
23
+ 2025-09-18 15:02:30,708 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1743 | Val mean-roc_auc_score: 0.7395
24
+ 2025-09-18 15:02:34,845 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2153 | Val mean-roc_auc_score: 0.6879
25
+ 2025-09-18 15:02:38,652 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1464 | Val mean-roc_auc_score: 0.6991
26
+ 2025-09-18 15:02:42,328 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1308 | Val mean-roc_auc_score: 0.7077
27
+ 2025-09-18 15:02:43,537 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.7111
28
+ 2025-09-18 15:02:47,086 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1151 | Val mean-roc_auc_score: 0.7047
29
+ 2025-09-18 15:02:50,839 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1338 | Val mean-roc_auc_score: 0.7127
30
+ 2025-09-18 15:02:54,961 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0913 | Val mean-roc_auc_score: 0.7159
31
+ 2025-09-18 15:02:58,675 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0761 | Val mean-roc_auc_score: 0.7213
32
+ 2025-09-18 15:03:02,528 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0987 | Val mean-roc_auc_score: 0.7098
33
+ 2025-09-18 15:03:06,465 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7295
34
+ 2025-09-18 15:03:10,284 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6879
35
+ 2025-09-18 15:03:11,707 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0964 | Val mean-roc_auc_score: 0.7265
36
+ 2025-09-18 15:03:15,648 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0535 | Val mean-roc_auc_score: 0.7227
37
+ 2025-09-18 15:03:19,378 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0729 | Val mean-roc_auc_score: 0.7319
38
+ 2025-09-18 15:03:23,118 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0485 | Val mean-roc_auc_score: 0.7207
39
+ 2025-09-18 15:03:26,796 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0364 | Val mean-roc_auc_score: 0.7139
40
+ 2025-09-18 15:03:31,768 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0739 | Val mean-roc_auc_score: 0.7231
41
+ 2025-09-18 15:03:35,595 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7276
42
+ 2025-09-18 15:03:39,260 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.7361
43
+ 2025-09-18 15:03:45,517 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0467 | Val mean-roc_auc_score: 0.7290
44
+ 2025-09-18 15:03:44,160 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0327 | Val mean-roc_auc_score: 0.7269
45
+ 2025-09-18 15:03:48,217 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0454 | Val mean-roc_auc_score: 0.7487
46
+ 2025-09-18 15:03:52,027 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0331 | Val mean-roc_auc_score: 0.7485
47
+ 2025-09-18 15:03:55,933 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0231 | Val mean-roc_auc_score: 0.7551
48
+ 2025-09-18 15:03:59,747 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0755 | Val mean-roc_auc_score: 0.7323
49
+ 2025-09-18 15:04:03,436 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0374 | Val mean-roc_auc_score: 0.7460
50
+ 2025-09-18 15:04:07,650 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7456
51
+ 2025-09-18 15:04:11,475 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0271 | Val mean-roc_auc_score: 0.7458
52
+ 2025-09-18 15:04:12,732 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0278 | Val mean-roc_auc_score: 0.7348
53
+ 2025-09-18 15:04:16,379 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0271 | Val mean-roc_auc_score: 0.7475
54
+ 2025-09-18 15:04:20,075 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0372 | Val mean-roc_auc_score: 0.7396
55
+ 2025-09-18 15:04:24,075 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0214 | Val mean-roc_auc_score: 0.7459
56
+ 2025-09-18 15:04:27,737 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0215 | Val mean-roc_auc_score: 0.7563
57
+ 2025-09-18 15:04:31,299 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0485 | Val mean-roc_auc_score: 0.7183
58
+ 2025-09-18 15:04:34,890 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0426 | Val mean-roc_auc_score: 0.7329
59
+ 2025-09-18 15:04:38,450 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0178 | Val mean-roc_auc_score: 0.7101
60
+ 2025-09-18 15:04:42,344 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.7239
61
+ 2025-09-18 15:04:43,608 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0286 | Val mean-roc_auc_score: 0.7349
62
+ 2025-09-18 15:04:47,071 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0183 | Val mean-roc_auc_score: 0.7160
63
+ 2025-09-18 15:04:50,640 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.7248
64
+ 2025-09-18 15:04:54,239 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.7233
65
+ 2025-09-18 15:04:58,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0421 | Val mean-roc_auc_score: 0.7340
66
+ 2025-09-18 15:05:02,862 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0636 | Val mean-roc_auc_score: 0.7197
67
+ 2025-09-18 15:05:06,412 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.7165
68
+ 2025-09-18 15:05:09,914 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0187 | Val mean-roc_auc_score: 0.7049
69
+ 2025-09-18 15:05:10,894 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.7028
70
+ 2025-09-18 15:05:15,058 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.7124
71
+ 2025-09-18 15:05:18,575 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.7075
72
+ 2025-09-18 15:05:22,079 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7129
73
+ 2025-09-18 15:05:25,711 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.7167
74
+ 2025-09-18 15:05:29,193 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0228 | Val mean-roc_auc_score: 0.7250
75
+ 2025-09-18 15:05:33,024 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0271 | Val mean-roc_auc_score: 0.7161
76
+ 2025-09-18 15:05:36,919 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7469
77
+ 2025-09-18 15:05:40,413 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0374 | Val mean-roc_auc_score: 0.7325
78
+ 2025-09-18 15:05:41,383 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0278 | Val mean-roc_auc_score: 0.7159
79
+ 2025-09-18 15:05:45,047 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0129 | Val mean-roc_auc_score: 0.7350
80
+ 2025-09-18 15:05:48,939 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.7293
81
+ 2025-09-18 15:05:52,495 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.7365
82
+ 2025-09-18 15:05:55,927 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0124 | Val mean-roc_auc_score: 0.7509
83
+ 2025-09-18 15:05:59,635 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.7498
84
+ 2025-09-18 15:06:03,197 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0161 | Val mean-roc_auc_score: 0.7444
85
+ 2025-09-18 15:06:07,043 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0155 | Val mean-roc_auc_score: 0.7320
86
+ 2025-09-18 15:06:10,770 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7491
87
+ 2025-09-18 15:06:11,739 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7407
88
+ 2025-09-18 15:06:15,228 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.7409
89
+ 2025-09-18 15:06:18,763 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.7450
90
+ 2025-09-18 15:06:23,064 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.7383
91
+ 2025-09-18 15:06:26,545 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7425
92
+ 2025-09-18 15:06:30,922 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.1016 | Val mean-roc_auc_score: 0.7471
93
+ 2025-09-18 15:06:34,551 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0210 | Val mean-roc_auc_score: 0.7213
94
+ 2025-09-18 15:06:38,123 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0168 | Val mean-roc_auc_score: 0.7229
95
+ 2025-09-18 15:06:42,025 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7339
96
+ 2025-09-18 15:06:43,099 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7321
97
+ 2025-09-18 15:06:47,002 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7311
98
+ 2025-09-18 15:06:50,820 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.7321
99
+ 2025-09-18 15:06:54,316 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.7281
100
+ 2025-09-18 15:06:58,110 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0208 | Val mean-roc_auc_score: 0.7422
101
+ 2025-09-18 15:07:01,600 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0161 | Val mean-roc_auc_score: 0.7350
102
+ 2025-09-18 15:07:05,217 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.7440
103
+ 2025-09-18 15:07:08,765 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7375
104
+ 2025-09-18 15:07:12,375 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7471
105
+ 2025-09-18 15:07:13,657 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7429
106
+ 2025-09-18 15:07:17,248 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.7331
107
+ 2025-09-18 15:07:20,814 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7389
108
+ 2025-09-18 15:07:24,587 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7235
109
+ 2025-09-18 15:07:28,015 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.7220
110
+ 2025-09-18 15:07:31,749 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0280 | Val mean-roc_auc_score: 0.7155
111
+ 2025-09-18 15:07:35,352 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0373 | Val mean-roc_auc_score: 0.7207
112
+ 2025-09-18 15:07:38,811 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.7235
113
+ 2025-09-18 15:07:42,271 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7283
114
+ 2025-09-18 15:07:42,593 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7926
115
+ 2025-09-18 15:07:42,854 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_classification at 2025-09-18_15-07-42
116
+ 2025-09-18 15:07:43,485 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.7303 | Val mean-roc_auc_score: 0.6524
117
+ 2025-09-18 15:07:43,485 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
118
+ 2025-09-18 15:07:44,315 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6524
119
+ 2025-09-18 15:07:47,845 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4770 | Val mean-roc_auc_score: 0.7217
120
+ 2025-09-18 15:07:48,004 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
121
+ 2025-09-18 15:07:48,485 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7217
122
+ 2025-09-18 15:07:52,167 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3772 | Val mean-roc_auc_score: 0.7325
123
+ 2025-09-18 15:07:52,329 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
124
+ 2025-09-18 15:07:52,806 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7325
125
+ 2025-09-18 15:07:56,455 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.3339 | Val mean-roc_auc_score: 0.7463
126
+ 2025-09-18 15:07:56,628 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 152
127
+ 2025-09-18 15:07:57,129 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7463
128
+ 2025-09-18 15:08:00,944 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2681 | Val mean-roc_auc_score: 0.7095
129
+ 2025-09-18 15:08:04,625 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2612 | Val mean-roc_auc_score: 0.7310
130
+ 2025-09-18 15:08:08,576 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2319 | Val mean-roc_auc_score: 0.7627
131
+ 2025-09-18 15:08:08,742 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 266
132
+ 2025-09-18 15:08:09,244 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.7627
133
+ 2025-09-18 15:08:12,746 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1816 | Val mean-roc_auc_score: 0.7506
134
+ 2025-09-18 15:08:13,724 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1965 | Val mean-roc_auc_score: 0.7420
135
+ 2025-09-18 15:08:17,354 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1859 | Val mean-roc_auc_score: 0.7114
136
+ 2025-09-18 15:08:21,031 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1649 | Val mean-roc_auc_score: 0.7253
137
+ 2025-09-18 15:08:25,259 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1678 | Val mean-roc_auc_score: 0.7302
138
+ 2025-09-18 15:08:28,830 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1340 | Val mean-roc_auc_score: 0.7034
139
+ 2025-09-18 15:08:32,264 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1387 | Val mean-roc_auc_score: 0.7080
140
+ 2025-09-18 15:08:35,537 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1168 | Val mean-roc_auc_score: 0.7126
141
+ 2025-09-18 15:08:38,946 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1660 | Val mean-roc_auc_score: 0.7265
142
+ 2025-09-18 15:08:42,564 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1118 | Val mean-roc_auc_score: 0.7217
143
+ 2025-09-18 15:08:43,596 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0995 | Val mean-roc_auc_score: 0.7204
144
+ 2025-09-18 15:08:46,981 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1080 | Val mean-roc_auc_score: 0.7265
145
+ 2025-09-18 15:08:50,290 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0975 | Val mean-roc_auc_score: 0.7443
146
+ 2025-09-18 15:08:53,576 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0748 | Val mean-roc_auc_score: 0.7405
147
+ 2025-09-18 15:08:57,236 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0556 | Val mean-roc_auc_score: 0.7150
148
+ 2025-09-18 15:09:00,754 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0500 | Val mean-roc_auc_score: 0.7164
149
+ 2025-09-18 15:09:04,088 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0859 | Val mean-roc_auc_score: 0.7163
150
+ 2025-09-18 15:09:07,435 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0555 | Val mean-roc_auc_score: 0.7240
151
+ 2025-09-18 15:09:10,821 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0310 | Val mean-roc_auc_score: 0.7273
152
+ 2025-09-18 15:09:12,859 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0439 | Val mean-roc_auc_score: 0.7302
153
+ 2025-09-18 15:09:16,121 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0646 | Val mean-roc_auc_score: 0.7467
154
+ 2025-09-18 15:09:19,535 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0496 | Val mean-roc_auc_score: 0.7522
155
+ 2025-09-18 15:09:22,956 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0368 | Val mean-roc_auc_score: 0.7348
156
+ 2025-09-18 15:09:26,180 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0691 | Val mean-roc_auc_score: 0.7437
157
+ 2025-09-18 15:09:29,816 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0320 | Val mean-roc_auc_score: 0.7592
158
+ 2025-09-18 15:09:33,214 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0452 | Val mean-roc_auc_score: 0.7495
159
+ 2025-09-18 15:09:36,584 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0748 | Val mean-roc_auc_score: 0.7348
160
+ 2025-09-18 15:09:39,994 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0500 | Val mean-roc_auc_score: 0.7398
161
+ 2025-09-18 15:09:46,185 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0378 | Val mean-roc_auc_score: 0.7261
162
+ 2025-09-18 15:09:45,042 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.7425
163
+ 2025-09-18 15:09:48,562 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0184 | Val mean-roc_auc_score: 0.7350
164
+ 2025-09-18 15:09:52,090 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0157 | Val mean-roc_auc_score: 0.7373
165
+ 2025-09-18 15:09:55,548 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.7362
166
+ 2025-09-18 15:09:59,143 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.7222
167
+ 2025-09-18 15:10:03,000 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0391 | Val mean-roc_auc_score: 0.7445
168
+ 2025-09-18 15:10:06,519 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0423 | Val mean-roc_auc_score: 0.7311
169
+ 2025-09-18 15:10:09,810 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0360 | Val mean-roc_auc_score: 0.6991
170
+ 2025-09-18 15:10:13,234 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7149
171
+ 2025-09-18 15:10:14,089 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0125 | Val mean-roc_auc_score: 0.7153
172
+ 2025-09-18 15:10:17,685 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.7191
173
+ 2025-09-18 15:10:21,246 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0308 | Val mean-roc_auc_score: 0.7208
174
+ 2025-09-18 15:10:24,861 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0271 | Val mean-roc_auc_score: 0.7163
175
+ 2025-09-18 15:10:28,435 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0310 | Val mean-roc_auc_score: 0.7311
176
+ 2025-09-18 15:10:31,923 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0329 | Val mean-roc_auc_score: 0.7261
177
+ 2025-09-18 15:10:35,801 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7109
178
+ 2025-09-18 15:10:40,221 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.7203
179
+ 2025-09-18 15:10:41,166 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.7234
180
+ 2025-09-18 15:10:44,674 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.7201
181
+ 2025-09-18 15:10:48,220 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.7285
182
+ 2025-09-18 15:10:52,099 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7236
183
+ 2025-09-18 15:10:55,729 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7285
184
+ 2025-09-18 15:10:59,287 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.7266
185
+ 2025-09-18 15:11:02,893 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.7209
186
+ 2025-09-18 15:11:06,270 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.7186
187
+ 2025-09-18 15:11:10,060 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0180 | Val mean-roc_auc_score: 0.7077
188
+ 2025-09-18 15:11:10,856 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.7188
189
+ 2025-09-18 15:11:14,133 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0204 | Val mean-roc_auc_score: 0.7311
190
+ 2025-09-18 15:11:17,450 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.7174
191
+ 2025-09-18 15:11:20,780 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.7173
192
+ 2025-09-18 15:11:24,368 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7270
193
+ 2025-09-18 15:11:27,664 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0167 | Val mean-roc_auc_score: 0.7297
194
+ 2025-09-18 15:11:31,065 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.7351
195
+ 2025-09-18 15:11:34,420 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.7226
196
+ 2025-09-18 15:11:37,849 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7293
197
+ 2025-09-18 15:11:41,445 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.7082
198
+ 2025-09-18 15:11:42,385 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0153 | Val mean-roc_auc_score: 0.7203
199
+ 2025-09-18 15:11:45,835 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.7263
200
+ 2025-09-18 15:11:49,337 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.7190
201
+ 2025-09-18 15:11:53,061 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0241 | Val mean-roc_auc_score: 0.7248
202
+ 2025-09-18 15:11:56,974 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.7163
203
+ 2025-09-18 15:12:00,476 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.7192
204
+ 2025-09-18 15:12:04,787 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0159 | Val mean-roc_auc_score: 0.7199
205
+ 2025-09-18 15:12:08,383 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0151 | Val mean-roc_auc_score: 0.7245
206
+ 2025-09-18 15:12:12,018 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0209 | Val mean-roc_auc_score: 0.7051
207
+ 2025-09-18 15:12:13,336 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0381 | Val mean-roc_auc_score: 0.7106
208
+ 2025-09-18 15:12:16,794 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0382 | Val mean-roc_auc_score: 0.7148
209
+ 2025-09-18 15:12:20,097 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.7048
210
+ 2025-09-18 15:12:23,384 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0182 | Val mean-roc_auc_score: 0.7070
211
+ 2025-09-18 15:12:26,623 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7072
212
+ 2025-09-18 15:12:30,234 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7079
213
+ 2025-09-18 15:12:33,547 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7016
214
+ 2025-09-18 15:12:36,787 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.7011
215
+ 2025-09-18 15:12:40,038 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.7046
216
+ 2025-09-18 15:12:43,283 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7084
217
+ 2025-09-18 15:12:44,304 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0229 | Val mean-roc_auc_score: 0.6846
218
+ 2025-09-18 15:12:47,607 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.6935
219
+ 2025-09-18 15:12:50,862 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.6994
220
+ 2025-09-18 15:12:54,077 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0207 | Val mean-roc_auc_score: 0.7148
221
+ 2025-09-18 15:12:57,369 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0182 | Val mean-roc_auc_score: 0.7158
222
+ 2025-09-18 15:13:00,977 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.7016
223
+ 2025-09-18 15:13:04,345 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.6941
224
+ 2025-09-18 15:13:07,646 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.6957
225
+ 2025-09-18 15:13:10,996 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0125 | Val mean-roc_auc_score: 0.7058
226
+ 2025-09-18 15:13:11,331 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8096
227
+ 2025-09-18 15:13:11,587 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_classification at 2025-09-18_15-13-11
228
+ 2025-09-18 15:13:11,989 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.6776 | Val mean-roc_auc_score: 0.6787
229
+ 2025-09-18 15:13:11,989 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
230
+ 2025-09-18 15:13:12,577 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6787
231
+ 2025-09-18 15:13:16,116 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4605 | Val mean-roc_auc_score: 0.6951
232
+ 2025-09-18 15:13:16,285 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
233
+ 2025-09-18 15:13:16,937 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.6951
234
+ 2025-09-18 15:13:20,289 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4308 | Val mean-roc_auc_score: 0.7196
235
+ 2025-09-18 15:13:20,460 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
236
+ 2025-09-18 15:13:20,947 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7196
237
+ 2025-09-18 15:13:24,267 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.3174 | Val mean-roc_auc_score: 0.7378
238
+ 2025-09-18 15:13:24,443 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 152
239
+ 2025-09-18 15:13:24,933 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7378
240
+ 2025-09-18 15:13:28,279 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2944 | Val mean-roc_auc_score: 0.7407
241
+ 2025-09-18 15:13:28,450 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 190
242
+ 2025-09-18 15:13:28,933 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7407
243
+ 2025-09-18 15:13:32,364 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2734 | Val mean-roc_auc_score: 0.7238
244
+ 2025-09-18 15:13:35,982 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2220 | Val mean-roc_auc_score: 0.7353
245
+ 2025-09-18 15:13:39,320 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2021 | Val mean-roc_auc_score: 0.6961
246
+ 2025-09-18 15:13:42,820 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1669 | Val mean-roc_auc_score: 0.7447
247
+ 2025-09-18 15:13:42,990 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 342
248
+ 2025-09-18 15:13:43,495 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.7447
249
+ 2025-09-18 15:13:44,799 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1628 | Val mean-roc_auc_score: 0.7172
250
+ 2025-09-18 15:13:48,355 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1441 | Val mean-roc_auc_score: 0.7530
251
+ 2025-09-18 15:13:48,780 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 418
252
+ 2025-09-18 15:13:49,283 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.7530
253
+ 2025-09-18 15:13:53,034 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1291 | Val mean-roc_auc_score: 0.7241
254
+ 2025-09-18 15:13:56,424 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1127 | Val mean-roc_auc_score: 0.7285
255
+ 2025-09-18 15:13:59,900 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1128 | Val mean-roc_auc_score: 0.7361
256
+ 2025-09-18 15:14:03,376 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0905 | Val mean-roc_auc_score: 0.7328
257
+ 2025-09-18 15:14:06,914 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.7179
258
+ 2025-09-18 15:14:10,655 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7168
259
+ 2025-09-18 15:14:11,599 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0958 | Val mean-roc_auc_score: 0.7290
260
+ 2025-09-18 15:14:15,357 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1264 | Val mean-roc_auc_score: 0.6874
261
+ 2025-09-18 15:14:18,923 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0863 | Val mean-roc_auc_score: 0.7040
262
+ 2025-09-18 15:14:22,648 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0699 | Val mean-roc_auc_score: 0.7283
263
+ 2025-09-18 15:14:26,540 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1293 | Val mean-roc_auc_score: 0.7181
264
+ 2025-09-18 15:14:30,104 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7114
265
+ 2025-09-18 15:14:33,761 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0524 | Val mean-roc_auc_score: 0.7102
266
+ 2025-09-18 15:14:37,545 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0493 | Val mean-roc_auc_score: 0.7084
267
+ 2025-09-18 15:14:41,319 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0526 | Val mean-roc_auc_score: 0.7011
268
+ 2025-09-18 15:14:43,487 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0409 | Val mean-roc_auc_score: 0.7040
269
+ 2025-09-18 15:14:47,086 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0707 | Val mean-roc_auc_score: 0.7271
270
+ 2025-09-18 15:14:50,363 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.6969
271
+ 2025-09-18 15:14:53,830 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0382 | Val mean-roc_auc_score: 0.6942
272
+ 2025-09-18 15:14:57,232 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1086 | Val mean-roc_auc_score: 0.7281
273
+ 2025-09-18 15:15:00,907 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.7227
274
+ 2025-09-18 15:15:04,258 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0360 | Val mean-roc_auc_score: 0.7155
275
+ 2025-09-18 15:15:07,723 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0253 | Val mean-roc_auc_score: 0.7051
276
+ 2025-09-18 15:15:11,096 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0208 | Val mean-roc_auc_score: 0.7002
277
+ 2025-09-18 15:15:12,377 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0405 | Val mean-roc_auc_score: 0.7083
278
+ 2025-09-18 15:15:16,053 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0228 | Val mean-roc_auc_score: 0.7162
279
+ 2025-09-18 15:15:19,434 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0259 | Val mean-roc_auc_score: 0.6978
280
+ 2025-09-18 15:15:22,767 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.7014
281
+ 2025-09-18 15:15:26,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0270 | Val mean-roc_auc_score: 0.7144
282
+ 2025-09-18 15:15:29,637 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0364 | Val mean-roc_auc_score: 0.7063
283
+ 2025-09-18 15:15:33,367 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0282 | Val mean-roc_auc_score: 0.7164
284
+ 2025-09-18 15:15:36,921 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0234 | Val mean-roc_auc_score: 0.7158
285
+ 2025-09-18 15:15:40,478 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0197 | Val mean-roc_auc_score: 0.7188
286
+ 2025-09-18 15:15:44,005 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0699 | Val mean-roc_auc_score: 0.7120
287
+ 2025-09-18 15:15:44,903 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0407 | Val mean-roc_auc_score: 0.7275
288
+ 2025-09-18 15:15:48,515 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0481 | Val mean-roc_auc_score: 0.7361
289
+ 2025-09-18 15:15:52,249 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0270 | Val mean-roc_auc_score: 0.7264
290
+ 2025-09-18 15:15:55,741 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0458 | Val mean-roc_auc_score: 0.7168
291
+ 2025-09-18 15:15:59,099 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0247 | Val mean-roc_auc_score: 0.7205
292
+ 2025-09-18 15:16:02,629 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0212 | Val mean-roc_auc_score: 0.7333
293
+ 2025-09-18 15:16:06,489 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0161 | Val mean-roc_auc_score: 0.7291
294
+ 2025-09-18 15:16:10,916 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.7312
295
+ 2025-09-18 15:16:11,952 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0206 | Val mean-roc_auc_score: 0.7277
296
+ 2025-09-18 15:16:15,529 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0163 | Val mean-roc_auc_score: 0.7244
297
+ 2025-09-18 15:16:19,227 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.7198
298
+ 2025-09-18 15:16:23,158 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.7233
299
+ 2025-09-18 15:16:26,663 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0327 | Val mean-roc_auc_score: 0.7260
300
+ 2025-09-18 15:16:30,418 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0479 | Val mean-roc_auc_score: 0.7483
301
+ 2025-09-18 15:16:34,154 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0228 | Val mean-roc_auc_score: 0.7284
302
+ 2025-09-18 15:16:37,457 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0315 | Val mean-roc_auc_score: 0.7235
303
+ 2025-09-18 15:16:41,264 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0222 | Val mean-roc_auc_score: 0.7482
304
+ 2025-09-18 15:16:42,180 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0341 | Val mean-roc_auc_score: 0.7354
305
+ 2025-09-18 15:16:45,629 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.7409
306
+ 2025-09-18 15:16:49,292 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.7371
307
+ 2025-09-18 15:16:52,843 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0250 | Val mean-roc_auc_score: 0.7401
308
+ 2025-09-18 15:16:56,537 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0258 | Val mean-roc_auc_score: 0.7371
309
+ 2025-09-18 15:17:00,003 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.7269
310
+ 2025-09-18 15:17:03,592 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0152 | Val mean-roc_auc_score: 0.7309
311
+ 2025-09-18 15:17:06,937 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.7251
312
+ 2025-09-18 15:17:10,259 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.7293
313
+ 2025-09-18 15:17:13,883 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7242
314
+ 2025-09-18 15:17:14,821 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7253
315
+ 2025-09-18 15:17:18,157 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.7248
316
+ 2025-09-18 15:17:21,551 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7241
317
+ 2025-09-18 15:17:25,074 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.7192
318
+ 2025-09-18 15:17:28,798 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7246
319
+ 2025-09-18 15:17:32,112 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7238
320
+ 2025-09-18 15:17:36,322 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.7193
321
+ 2025-09-18 15:17:39,977 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0229 | Val mean-roc_auc_score: 0.7277
322
+ 2025-09-18 15:17:43,358 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0280 | Val mean-roc_auc_score: 0.7174
323
+ 2025-09-18 15:17:44,472 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0124 | Val mean-roc_auc_score: 0.7142
324
+ 2025-09-18 15:17:47,766 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.7154
325
+ 2025-09-18 15:17:50,997 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.7278
326
+ 2025-09-18 15:17:54,315 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.7212
327
+ 2025-09-18 15:17:57,708 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.7233
328
+ 2025-09-18 15:18:01,294 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7229
329
+ 2025-09-18 15:18:04,804 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7264
330
+ 2025-09-18 15:18:08,237 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.7293
331
+ 2025-09-18 15:18:11,675 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.7308
332
+ 2025-09-18 15:18:12,600 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.7122
333
+ 2025-09-18 15:18:16,350 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.7207
334
+ 2025-09-18 15:18:20,173 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7245
335
+ 2025-09-18 15:18:23,477 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7261
336
+ 2025-09-18 15:18:26,794 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.7246
337
+ 2025-09-18 15:18:30,136 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.7246
338
+ 2025-09-18 15:18:33,685 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.7245
339
+ 2025-09-18 15:18:37,165 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7246
340
+ 2025-09-18 15:18:40,655 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7219
341
+ 2025-09-18 15:18:44,029 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.7178
342
+ 2025-09-18 15:18:46,973 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8173
343
+ 2025-09-18 15:18:42,119 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.8065, Std Dev: 0.0103
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_151842.log ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:18:42,120 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Running benchmark for dataset: bbbp
2
+ 2025-09-18 15:18:42,120 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - dataset: bbbp, tasks: ['p_np'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 15:18:42,137 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bbbp at 2025-09-18_15-18-42
4
+ 2025-09-18 15:18:45,767 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3053 | Val mean-roc_auc_score: 0.9943
5
+ 2025-09-18 15:18:45,768 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
6
+ 2025-09-18 15:18:46,511 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9943
7
+ 2025-09-18 15:18:50,991 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.9912
8
+ 2025-09-18 15:18:55,468 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1334 | Val mean-roc_auc_score: 0.9946
9
+ 2025-09-18 15:18:55,629 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 156
10
+ 2025-09-18 15:18:56,127 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9946
11
+ 2025-09-18 15:19:00,656 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0811 | Val mean-roc_auc_score: 0.9947
12
+ 2025-09-18 15:19:00,820 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 208
13
+ 2025-09-18 15:19:01,294 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9947
14
+ 2025-09-18 15:19:05,646 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0637 | Val mean-roc_auc_score: 0.9916
15
+ 2025-09-18 15:19:10,083 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0544 | Val mean-roc_auc_score: 0.9928
16
+ 2025-09-18 15:19:12,386 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0370 | Val mean-roc_auc_score: 0.9935
17
+ 2025-09-18 15:19:16,931 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0287 | Val mean-roc_auc_score: 0.9893
18
+ 2025-09-18 15:19:21,561 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0490 | Val mean-roc_auc_score: 0.9933
19
+ 2025-09-18 15:19:26,106 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0248 | Val mean-roc_auc_score: 0.9915
20
+ 2025-09-18 15:19:30,725 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.9900
21
+ 2025-09-18 15:19:35,722 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9915
22
+ 2025-09-18 15:19:40,287 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9904
23
+ 2025-09-18 15:19:42,132 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9906
24
+ 2025-09-18 15:19:46,338 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9862
25
+ 2025-09-18 15:19:50,639 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.9796
26
+ 2025-09-18 15:19:55,513 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0329 | Val mean-roc_auc_score: 0.9866
27
+ 2025-09-18 15:19:59,915 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.9872
28
+ 2025-09-18 15:20:04,362 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9878
29
+ 2025-09-18 15:20:09,476 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9862
30
+ 2025-09-18 15:20:13,891 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9875
31
+ 2025-09-18 15:20:15,926 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9877
32
+ 2025-09-18 15:20:20,012 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9875
33
+ 2025-09-18 15:20:24,229 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9876
34
+ 2025-09-18 15:20:28,493 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9879
35
+ 2025-09-18 15:20:32,685 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.9890
36
+ 2025-09-18 15:20:37,443 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9883
37
+ 2025-09-18 15:20:41,844 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9883
38
+ 2025-09-18 15:20:43,631 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.9884
39
+ 2025-09-18 15:20:48,130 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9890
40
+ 2025-09-18 15:20:52,574 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9887
41
+ 2025-09-18 15:20:57,262 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9886
42
+ 2025-09-18 15:21:01,659 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9885
43
+ 2025-09-18 15:21:06,036 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9888
44
+ 2025-09-18 15:21:10,359 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9882
45
+ 2025-09-18 15:21:17,174 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9871
46
+ 2025-09-18 15:21:16,761 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0334 | Val mean-roc_auc_score: 0.9849
47
+ 2025-09-18 15:21:21,269 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0347 | Val mean-roc_auc_score: 0.9851
48
+ 2025-09-18 15:21:26,654 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.9881
49
+ 2025-09-18 15:21:31,105 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9863
50
+ 2025-09-18 15:21:35,727 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9875
51
+ 2025-09-18 15:21:40,480 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9883
52
+ 2025-09-18 15:21:42,434 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9888
53
+ 2025-09-18 15:21:46,849 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9884
54
+ 2025-09-18 15:21:51,050 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9889
55
+ 2025-09-18 15:21:55,278 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9890
56
+ 2025-09-18 15:21:59,835 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9893
57
+ 2025-09-18 15:22:03,965 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9894
58
+ 2025-09-18 15:22:08,176 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9892
59
+ 2025-09-18 15:22:12,498 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9888
60
+ 2025-09-18 15:22:14,156 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9889
61
+ 2025-09-18 15:22:18,759 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9890
62
+ 2025-09-18 15:22:23,165 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9892
63
+ 2025-09-18 15:22:27,399 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9890
64
+ 2025-09-18 15:22:31,822 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0164 | Val mean-roc_auc_score: 0.9836
65
+ 2025-09-18 15:22:35,958 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9886
66
+ 2025-09-18 15:22:40,838 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9878
67
+ 2025-09-18 15:22:43,672 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9879
68
+ 2025-09-18 15:22:47,955 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9879
69
+ 2025-09-18 15:22:52,292 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9868
70
+ 2025-09-18 15:22:56,875 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9877
71
+ 2025-09-18 15:23:01,316 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9887
72
+ 2025-09-18 15:23:05,535 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9889
73
+ 2025-09-18 15:23:09,690 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9889
74
+ 2025-09-18 15:23:13,974 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9886
75
+ 2025-09-18 15:23:15,740 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9888
76
+ 2025-09-18 15:23:20,461 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9889
77
+ 2025-09-18 15:23:24,641 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9890
78
+ 2025-09-18 15:23:28,728 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9890
79
+ 2025-09-18 15:23:33,072 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9888
80
+ 2025-09-18 15:23:37,421 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9886
81
+ 2025-09-18 15:23:42,050 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9884
82
+ 2025-09-18 15:23:43,689 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9884
83
+ 2025-09-18 15:23:48,286 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9883
84
+ 2025-09-18 15:23:52,781 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9886
85
+ 2025-09-18 15:23:57,166 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9884
86
+ 2025-09-18 15:24:02,856 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9888
87
+ 2025-09-18 15:24:07,327 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9886
88
+ 2025-09-18 15:24:11,674 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9884
89
+ 2025-09-18 15:24:13,594 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9884
90
+ 2025-09-18 15:24:17,881 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9886
91
+ 2025-09-18 15:24:22,310 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9883
92
+ 2025-09-18 15:24:26,552 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9884
93
+ 2025-09-18 15:24:30,702 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9886
94
+ 2025-09-18 15:24:34,853 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9881
95
+ 2025-09-18 15:24:39,353 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9884
96
+ 2025-09-18 15:24:44,080 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9885
97
+ 2025-09-18 15:24:45,981 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9883
98
+ 2025-09-18 15:24:50,137 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9882
99
+ 2025-09-18 15:24:54,502 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9883
100
+ 2025-09-18 15:24:58,767 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9885
101
+ 2025-09-18 15:25:03,534 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9887
102
+ 2025-09-18 15:25:07,819 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9885
103
+ 2025-09-18 15:25:12,024 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9888
104
+ 2025-09-18 15:25:13,969 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9893
105
+ 2025-09-18 15:25:18,581 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9888
106
+ 2025-09-18 15:25:23,887 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9888
107
+ 2025-09-18 15:25:28,163 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9880
108
+ 2025-09-18 15:25:32,500 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9881
109
+ 2025-09-18 15:25:36,626 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9882
110
+ 2025-09-18 15:25:36,996 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7434
111
+ 2025-09-18 15:25:37,307 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bbbp at 2025-09-18_15-25-37
112
+ 2025-09-18 15:25:40,820 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2788 | Val mean-roc_auc_score: 0.9881
113
+ 2025-09-18 15:25:40,820 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
114
+ 2025-09-18 15:25:41,552 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9881
115
+ 2025-09-18 15:25:43,633 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1875 | Val mean-roc_auc_score: 0.9923
116
+ 2025-09-18 15:25:43,798 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
117
+ 2025-09-18 15:25:44,294 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9923
118
+ 2025-09-18 15:25:48,847 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1238 | Val mean-roc_auc_score: 0.9945
119
+ 2025-09-18 15:25:49,015 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 156
120
+ 2025-09-18 15:25:49,503 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9945
121
+ 2025-09-18 15:25:53,787 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.9907
122
+ 2025-09-18 15:25:58,038 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0622 | Val mean-roc_auc_score: 0.9908
123
+ 2025-09-18 15:26:02,488 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.9929
124
+ 2025-09-18 15:26:07,251 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0406 | Val mean-roc_auc_score: 0.9924
125
+ 2025-09-18 15:26:11,518 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0204 | Val mean-roc_auc_score: 0.9949
126
+ 2025-09-18 15:26:11,689 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 416
127
+ 2025-09-18 15:26:12,213 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.9949
128
+ 2025-09-18 15:26:14,383 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0192 | Val mean-roc_auc_score: 0.9959
129
+ 2025-09-18 15:26:14,552 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 468
130
+ 2025-09-18 15:26:15,050 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.9959
131
+ 2025-09-18 15:26:19,324 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0207 | Val mean-roc_auc_score: 0.9961
132
+ 2025-09-18 15:26:19,487 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 520
133
+ 2025-09-18 15:26:19,993 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9961
134
+ 2025-09-18 15:26:24,322 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.9967
135
+ 2025-09-18 15:26:24,772 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 572
136
+ 2025-09-18 15:26:25,316 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9967
137
+ 2025-09-18 15:26:29,828 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.9969
138
+ 2025-09-18 15:26:29,999 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 624
139
+ 2025-09-18 15:26:30,503 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9969
140
+ 2025-09-18 15:26:35,198 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9967
141
+ 2025-09-18 15:26:39,467 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9965
142
+ 2025-09-18 15:26:43,816 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0119 | Val mean-roc_auc_score: 0.9959
143
+ 2025-09-18 15:26:45,585 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.9962
144
+ 2025-09-18 15:26:50,298 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9958
145
+ 2025-09-18 15:26:54,583 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0238 | Val mean-roc_auc_score: 0.9951
146
+ 2025-09-18 15:26:58,797 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0203 | Val mean-roc_auc_score: 0.9946
147
+ 2025-09-18 15:27:03,860 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0305 | Val mean-roc_auc_score: 0.9945
148
+ 2025-09-18 15:27:08,110 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0436 | Val mean-roc_auc_score: 0.9955
149
+ 2025-09-18 15:27:12,742 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.9976
150
+ 2025-09-18 15:27:12,883 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1144
151
+ 2025-09-18 15:27:13,384 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 22 with val mean-roc_auc_score: 0.9976
152
+ 2025-09-18 15:27:15,256 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9978
153
+ 2025-09-18 15:27:15,427 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1196
154
+ 2025-09-18 15:27:15,915 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 23 with val mean-roc_auc_score: 0.9978
155
+ 2025-09-18 15:27:20,212 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9975
156
+ 2025-09-18 15:27:24,631 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9972
157
+ 2025-09-18 15:27:29,084 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9972
158
+ 2025-09-18 15:27:33,907 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9970
159
+ 2025-09-18 15:27:38,401 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9971
160
+ 2025-09-18 15:27:42,792 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9980
161
+ 2025-09-18 15:27:42,931 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1508
162
+ 2025-09-18 15:27:43,504 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 29 with val mean-roc_auc_score: 0.9980
163
+ 2025-09-18 15:27:45,525 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.9972
164
+ 2025-09-18 15:27:49,953 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9974
165
+ 2025-09-18 15:27:54,704 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9972
166
+ 2025-09-18 15:27:59,299 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9972
167
+ 2025-09-18 15:28:03,557 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9971
168
+ 2025-09-18 15:28:08,139 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9970
169
+ 2025-09-18 15:28:12,569 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9972
170
+ 2025-09-18 15:28:14,989 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9970
171
+ 2025-09-18 15:28:19,274 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9971
172
+ 2025-09-18 15:28:24,573 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9973
173
+ 2025-09-18 15:28:29,228 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9971
174
+ 2025-09-18 15:28:33,643 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9971
175
+ 2025-09-18 15:28:38,312 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9970
176
+ 2025-09-18 15:28:42,651 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9969
177
+ 2025-09-18 15:28:44,554 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9970
178
+ 2025-09-18 15:28:48,962 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9970
179
+ 2025-09-18 15:28:53,277 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9969
180
+ 2025-09-18 15:28:58,048 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9971
181
+ 2025-09-18 15:29:02,528 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9970
182
+ 2025-09-18 15:29:06,997 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9970
183
+ 2025-09-18 15:29:11,626 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9971
184
+ 2025-09-18 15:29:13,642 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9970
185
+ 2025-09-18 15:29:18,434 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0157 | Val mean-roc_auc_score: 0.9943
186
+ 2025-09-18 15:29:22,663 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.9966
187
+ 2025-09-18 15:29:26,830 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9973
188
+ 2025-09-18 15:29:30,982 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9971
189
+ 2025-09-18 15:29:35,287 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9971
190
+ 2025-09-18 15:29:39,921 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9972
191
+ 2025-09-18 15:29:44,945 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9972
192
+ 2025-09-18 15:29:46,722 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9973
193
+ 2025-09-18 15:29:51,122 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9971
194
+ 2025-09-18 15:29:55,445 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9970
195
+ 2025-09-18 15:30:00,266 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9969
196
+ 2025-09-18 15:30:04,715 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9962
197
+ 2025-09-18 15:30:09,225 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9965
198
+ 2025-09-18 15:30:13,772 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9965
199
+ 2025-09-18 15:30:15,498 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9966
200
+ 2025-09-18 15:30:20,095 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9965
201
+ 2025-09-18 15:30:24,325 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9966
202
+ 2025-09-18 15:30:28,443 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9965
203
+ 2025-09-18 15:30:32,676 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9965
204
+ 2025-09-18 15:30:37,069 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9966
205
+ 2025-09-18 15:30:41,560 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9965
206
+ 2025-09-18 15:30:43,232 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9966
207
+ 2025-09-18 15:30:47,577 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9966
208
+ 2025-09-18 15:30:51,836 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9967
209
+ 2025-09-18 15:30:56,386 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9965
210
+ 2025-09-18 15:31:02,349 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9967
211
+ 2025-09-18 15:31:06,500 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9966
212
+ 2025-09-18 15:31:10,916 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9964
213
+ 2025-09-18 15:31:15,260 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9966
214
+ 2025-09-18 15:31:17,221 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9966
215
+ 2025-09-18 15:31:21,861 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9966
216
+ 2025-09-18 15:31:26,198 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9965
217
+ 2025-09-18 15:31:30,792 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9966
218
+ 2025-09-18 15:31:35,313 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9963
219
+ 2025-09-18 15:31:39,758 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9960
220
+ 2025-09-18 15:31:44,429 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9962
221
+ 2025-09-18 15:31:46,350 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9961
222
+ 2025-09-18 15:31:50,849 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9962
223
+ 2025-09-18 15:31:55,208 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9966
224
+ 2025-09-18 15:31:59,319 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9967
225
+ 2025-09-18 15:32:03,952 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9966
226
+ 2025-09-18 15:32:08,190 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9963
227
+ 2025-09-18 15:32:12,562 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9962
228
+ 2025-09-18 15:32:14,383 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9964
229
+ 2025-09-18 15:32:18,743 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9965
230
+ 2025-09-18 15:32:24,342 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9965
231
+ 2025-09-18 15:32:28,705 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9965
232
+ 2025-09-18 15:32:33,251 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9966
233
+ 2025-09-18 15:32:37,507 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9966
234
+ 2025-09-18 15:32:37,856 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7122
235
+ 2025-09-18 15:32:38,169 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bbbp at 2025-09-18_15-32-38
236
+ 2025-09-18 15:32:41,946 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2776 | Val mean-roc_auc_score: 0.9941
237
+ 2025-09-18 15:32:41,946 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
238
+ 2025-09-18 15:32:42,551 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9941
239
+ 2025-09-18 15:32:44,736 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4297 | Val mean-roc_auc_score: 0.9950
240
+ 2025-09-18 15:32:44,899 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
241
+ 2025-09-18 15:32:45,378 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9950
242
+ 2025-09-18 15:32:49,713 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1118 | Val mean-roc_auc_score: 0.9930
243
+ 2025-09-18 15:32:53,900 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0859 | Val mean-roc_auc_score: 0.9938
244
+ 2025-09-18 15:32:58,245 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.9907
245
+ 2025-09-18 15:33:02,695 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0690 | Val mean-roc_auc_score: 0.9918
246
+ 2025-09-18 15:33:07,315 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0451 | Val mean-roc_auc_score: 0.9938
247
+ 2025-09-18 15:33:11,908 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0337 | Val mean-roc_auc_score: 0.9939
248
+ 2025-09-18 15:33:13,923 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0230 | Val mean-roc_auc_score: 0.9940
249
+ 2025-09-18 15:33:18,332 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.9947
250
+ 2025-09-18 15:33:22,730 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0234 | Val mean-roc_auc_score: 0.9938
251
+ 2025-09-18 15:33:27,349 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0233 | Val mean-roc_auc_score: 0.9970
252
+ 2025-09-18 15:33:27,485 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 624
253
+ 2025-09-18 15:33:27,974 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9970
254
+ 2025-09-18 15:33:32,467 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0249 | Val mean-roc_auc_score: 0.9964
255
+ 2025-09-18 15:33:36,953 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0314 | Val mean-roc_auc_score: 0.9958
256
+ 2025-09-18 15:33:41,263 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0320 | Val mean-roc_auc_score: 0.9918
257
+ 2025-09-18 15:33:43,099 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0232 | Val mean-roc_auc_score: 0.9946
258
+ 2025-09-18 15:33:47,966 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.9938
259
+ 2025-09-18 15:33:52,076 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.9941
260
+ 2025-09-18 15:33:56,175 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.9947
261
+ 2025-09-18 15:34:01,271 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9944
262
+ 2025-09-18 15:34:05,328 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9944
263
+ 2025-09-18 15:34:09,767 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9945
264
+ 2025-09-18 15:34:14,027 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9942
265
+ 2025-09-18 15:34:15,771 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9944
266
+ 2025-09-18 15:34:20,229 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9944
267
+ 2025-09-18 15:34:24,764 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9943
268
+ 2025-09-18 15:34:29,530 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9943
269
+ 2025-09-18 15:34:33,833 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9943
270
+ 2025-09-18 15:34:37,898 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9943
271
+ 2025-09-18 15:34:42,042 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9938
272
+ 2025-09-18 15:34:43,716 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9937
273
+ 2025-09-18 15:34:48,309 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9939
274
+ 2025-09-18 15:34:52,519 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9936
275
+ 2025-09-18 15:34:56,722 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9940
276
+ 2025-09-18 15:35:00,936 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0142 | Val mean-roc_auc_score: 0.9938
277
+ 2025-09-18 15:35:05,230 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.9940
278
+ 2025-09-18 15:35:10,102 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0104 | Val mean-roc_auc_score: 0.9943
279
+ 2025-09-18 15:35:14,321 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0270 | Val mean-roc_auc_score: 0.9938
280
+ 2025-09-18 15:35:16,769 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0119 | Val mean-roc_auc_score: 0.9927
281
+ 2025-09-18 15:35:20,918 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9939
282
+ 2025-09-18 15:35:25,035 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9939
283
+ 2025-09-18 15:35:29,575 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9937
284
+ 2025-09-18 15:35:33,690 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9936
285
+ 2025-09-18 15:35:38,239 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9935
286
+ 2025-09-18 15:35:42,648 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9937
287
+ 2025-09-18 15:35:44,647 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9938
288
+ 2025-09-18 15:35:49,393 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9935
289
+ 2025-09-18 15:35:53,861 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9938
290
+ 2025-09-18 15:35:58,266 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9936
291
+ 2025-09-18 15:36:02,651 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9937
292
+ 2025-09-18 15:36:07,114 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9935
293
+ 2025-09-18 15:36:12,138 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9934
294
+ 2025-09-18 15:36:14,127 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9933
295
+ 2025-09-18 15:36:18,502 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9937
296
+ 2025-09-18 15:36:22,872 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9918
297
+ 2025-09-18 15:36:27,267 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9925
298
+ 2025-09-18 15:36:31,982 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9933
299
+ 2025-09-18 15:36:36,981 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9934
300
+ 2025-09-18 15:36:41,159 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9933
301
+ 2025-09-18 15:36:45,397 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.9935
302
+ 2025-09-18 15:36:47,344 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9937
303
+ 2025-09-18 15:36:51,993 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9931
304
+ 2025-09-18 15:36:56,453 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9933
305
+ 2025-09-18 15:37:00,659 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9933
306
+ 2025-09-18 15:37:04,901 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9932
307
+ 2025-09-18 15:37:09,846 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9933
308
+ 2025-09-18 15:37:14,442 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9930
309
+ 2025-09-18 15:37:16,246 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9937
310
+ 2025-09-18 15:37:20,311 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9932
311
+ 2025-09-18 15:37:24,570 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9937
312
+ 2025-09-18 15:37:28,766 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0412 | Val mean-roc_auc_score: 0.9755
313
+ 2025-09-18 15:37:33,218 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.9932
314
+ 2025-09-18 15:37:37,244 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0257 | Val mean-roc_auc_score: 0.9949
315
+ 2025-09-18 15:37:41,683 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9956
316
+ 2025-09-18 15:37:48,757 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0225 | Val mean-roc_auc_score: 0.9958
317
+ 2025-09-18 15:37:47,936 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.9957
318
+ 2025-09-18 15:37:53,405 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9957
319
+ 2025-09-18 15:37:57,740 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9957
320
+ 2025-09-18 15:38:02,189 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9956
321
+ 2025-09-18 15:38:06,670 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9956
322
+ 2025-09-18 15:38:11,195 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9957
323
+ 2025-09-18 15:38:16,045 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9957
324
+ 2025-09-18 15:38:18,035 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9957
325
+ 2025-09-18 15:38:22,409 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9956
326
+ 2025-09-18 15:38:26,687 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9956
327
+ 2025-09-18 15:38:30,876 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9955
328
+ 2025-09-18 15:38:35,712 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9955
329
+ 2025-09-18 15:38:39,978 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9954
330
+ 2025-09-18 15:38:44,352 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9954
331
+ 2025-09-18 15:38:46,030 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9955
332
+ 2025-09-18 15:38:50,346 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9954
333
+ 2025-09-18 15:38:55,022 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9955
334
+ 2025-09-18 15:38:59,368 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9955
335
+ 2025-09-18 15:39:03,775 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9951
336
+ 2025-09-18 15:39:08,239 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9954
337
+ 2025-09-18 15:39:12,645 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9953
338
+ 2025-09-18 15:39:15,886 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9952
339
+ 2025-09-18 15:39:20,121 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9955
340
+ 2025-09-18 15:39:24,378 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9954
341
+ 2025-09-18 15:39:28,732 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9955
342
+ 2025-09-18 15:39:29,096 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7110
343
+ 2025-09-18 15:39:29,415 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7222, Std Dev: 0.0150
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250918_170254.log ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 17:02:54,705 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Running benchmark for dataset: clintox
2
+ 2025-09-18 17:02:54,705 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - dataset: clintox, tasks: ['FDA_APPROVED', 'CT_TOX'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 17:02:54,713 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clintox at 2025-09-18_17-02-54
4
+ 2025-09-18 17:02:58,564 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1765 | Val mean-roc_auc_score: 0.9423
5
+ 2025-09-18 17:02:58,565 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
6
+ 2025-09-18 17:02:59,109 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9423
7
+ 2025-09-18 17:03:03,791 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0515 | Val mean-roc_auc_score: 0.9793
8
+ 2025-09-18 17:03:03,967 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
9
+ 2025-09-18 17:03:04,581 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9793
10
+ 2025-09-18 17:03:07,199 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0217 | Val mean-roc_auc_score: 0.9768
11
+ 2025-09-18 17:03:11,829 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0287 | Val mean-roc_auc_score: 0.9861
12
+ 2025-09-18 17:03:12,016 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 148
13
+ 2025-09-18 17:03:12,575 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9861
14
+ 2025-09-18 17:03:17,467 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0296 | Val mean-roc_auc_score: 0.9823
15
+ 2025-09-18 17:03:22,352 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0249 | Val mean-roc_auc_score: 0.9814
16
+ 2025-09-18 17:03:27,317 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0163 | Val mean-roc_auc_score: 0.9788
17
+ 2025-09-18 17:03:31,879 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0176 | Val mean-roc_auc_score: 0.9855
18
+ 2025-09-18 17:03:36,703 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0236 | Val mean-roc_auc_score: 0.9770
19
+ 2025-09-18 17:03:39,394 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.9822
20
+ 2025-09-18 17:03:44,303 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9881
21
+ 2025-09-18 17:03:44,893 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 407
22
+ 2025-09-18 17:03:45,452 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9881
23
+ 2025-09-18 17:03:50,449 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0131 | Val mean-roc_auc_score: 0.9895
24
+ 2025-09-18 17:03:50,657 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 444
25
+ 2025-09-18 17:03:51,234 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9895
26
+ 2025-09-18 17:03:56,346 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9863
27
+ 2025-09-18 17:04:01,143 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9873
28
+ 2025-09-18 17:04:05,972 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.9909
29
+ 2025-09-18 17:04:06,154 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 555
30
+ 2025-09-18 17:04:06,712 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val mean-roc_auc_score: 0.9909
31
+ 2025-09-18 17:04:08,884 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9872
32
+ 2025-09-18 17:04:14,646 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9810
33
+ 2025-09-18 17:04:19,142 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0113 | Val mean-roc_auc_score: 0.9882
34
+ 2025-09-18 17:04:24,050 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9867
35
+ 2025-09-18 17:04:28,908 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0306 | Val mean-roc_auc_score: 0.9829
36
+ 2025-09-18 17:04:33,576 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0285 | Val mean-roc_auc_score: 0.9799
37
+ 2025-09-18 17:04:41,445 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.9840
38
+ 2025-09-18 17:04:40,645 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9812
39
+ 2025-09-18 17:04:45,216 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0302 | Val mean-roc_auc_score: 0.9855
40
+ 2025-09-18 17:04:50,145 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9839
41
+ 2025-09-18 17:04:54,748 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9834
42
+ 2025-09-18 17:05:00,783 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.9834
43
+ 2025-09-18 17:05:05,829 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.9834
44
+ 2025-09-18 17:05:08,195 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9890
45
+ 2025-09-18 17:05:13,097 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.9879
46
+ 2025-09-18 17:05:18,142 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9891
47
+ 2025-09-18 17:05:23,656 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9891
48
+ 2025-09-18 17:05:28,412 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9891
49
+ 2025-09-18 17:05:33,118 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9912
50
+ 2025-09-18 17:05:33,264 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1258
51
+ 2025-09-18 17:05:33,842 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 34 with val mean-roc_auc_score: 0.9912
52
+ 2025-09-18 17:05:41,371 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9913
53
+ 2025-09-18 17:05:36,247 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1295
54
+ 2025-09-18 17:05:36,803 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 35 with val mean-roc_auc_score: 0.9913
55
+ 2025-09-18 17:05:41,374 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9886
56
+ 2025-09-18 17:05:46,534 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9912
57
+ 2025-09-18 17:05:51,462 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9875
58
+ 2025-09-18 17:05:56,117 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9912
59
+ 2025-09-18 17:06:00,976 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9886
60
+ 2025-09-18 17:06:05,564 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9899
61
+ 2025-09-18 17:06:08,458 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9910
62
+ 2025-09-18 17:06:13,455 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9908
63
+ 2025-09-18 17:06:18,158 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9890
64
+ 2025-09-18 17:06:22,715 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9912
65
+ 2025-09-18 17:06:27,006 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9907
66
+ 2025-09-18 17:06:31,900 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9892
67
+ 2025-09-18 17:06:36,453 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9863
68
+ 2025-09-18 17:06:38,458 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9788
69
+ 2025-09-18 17:06:42,971 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0152 | Val mean-roc_auc_score: 0.9824
70
+ 2025-09-18 17:06:47,365 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0245 | Val mean-roc_auc_score: 0.9910
71
+ 2025-09-18 17:06:52,478 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9909
72
+ 2025-09-18 17:06:57,181 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9915
73
+ 2025-09-18 17:06:57,347 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1961
74
+ 2025-09-18 17:06:57,953 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 53 with val mean-roc_auc_score: 0.9915
75
+ 2025-09-18 17:07:02,576 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9926
76
+ 2025-09-18 17:07:02,765 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1998
77
+ 2025-09-18 17:07:03,364 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 54 with val mean-roc_auc_score: 0.9926
78
+ 2025-09-18 17:07:07,445 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9926
79
+ 2025-09-18 17:07:12,364 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9915
80
+ 2025-09-18 17:07:17,254 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9932
81
+ 2025-09-18 17:07:17,432 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2109
82
+ 2025-09-18 17:07:18,011 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 57 with val mean-roc_auc_score: 0.9932
83
+ 2025-09-18 17:07:23,080 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9932
84
+ 2025-09-18 17:07:28,154 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9921
85
+ 2025-09-18 17:07:33,017 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9932
86
+ 2025-09-18 17:07:37,777 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9904
87
+ 2025-09-18 17:07:40,448 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9938
88
+ 2025-09-18 17:07:40,603 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2294
89
+ 2025-09-18 17:07:41,228 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 62 with val mean-roc_auc_score: 0.9938
90
+ 2025-09-18 17:07:46,513 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9926
91
+ 2025-09-18 17:07:51,672 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9932
92
+ 2025-09-18 17:07:56,672 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9915
93
+ 2025-09-18 17:08:01,551 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9915
94
+ 2025-09-18 17:08:07,030 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9881
95
+ 2025-09-18 17:08:09,211 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9903
96
+ 2025-09-18 17:08:14,109 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9898
97
+ 2025-09-18 17:08:19,086 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9909
98
+ 2025-09-18 17:08:23,571 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9892
99
+ 2025-09-18 17:08:28,621 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9920
100
+ 2025-09-18 17:08:33,048 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0001 | Val mean-roc_auc_score: 0.9893
101
+ 2025-09-18 17:08:37,663 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9906
102
+ 2025-09-18 17:08:39,814 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9925
103
+ 2025-09-18 17:08:43,828 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9920
104
+ 2025-09-18 17:08:48,955 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9909
105
+ 2025-09-18 17:08:53,829 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9909
106
+ 2025-09-18 17:08:58,446 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9909
107
+ 2025-09-18 17:09:03,036 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9928
108
+ 2025-09-18 17:09:07,528 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9875
109
+ 2025-09-18 17:09:10,874 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9892
110
+ 2025-09-18 17:09:15,348 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9897
111
+ 2025-09-18 17:09:19,466 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9897
112
+ 2025-09-18 17:09:24,085 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9909
113
+ 2025-09-18 17:09:29,066 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.9960
114
+ 2025-09-18 17:09:29,627 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 3182
115
+ 2025-09-18 17:09:30,209 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 86 with val mean-roc_auc_score: 0.9960
116
+ 2025-09-18 17:09:34,870 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9942
117
+ 2025-09-18 17:09:37,007 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.9951
118
+ 2025-09-18 17:09:41,858 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9948
119
+ 2025-09-18 17:09:46,486 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9948
120
+ 2025-09-18 17:09:50,807 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9943
121
+ 2025-09-18 17:09:55,737 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9938
122
+ 2025-09-18 17:10:00,089 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9943
123
+ 2025-09-18 17:10:04,660 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9943
124
+ 2025-09-18 17:10:06,741 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9937
125
+ 2025-09-18 17:10:11,281 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9937
126
+ 2025-09-18 17:10:16,281 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9938
127
+ 2025-09-18 17:10:20,697 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9954
128
+ 2025-09-18 17:10:25,108 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9937
129
+ 2025-09-18 17:10:29,779 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9937
130
+ 2025-09-18 17:10:30,238 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9928
131
+ 2025-09-18 17:10:30,678 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clintox at 2025-09-18_17-10-30
132
+ 2025-09-18 17:10:34,175 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2314 | Val mean-roc_auc_score: 0.9598
133
+ 2025-09-18 17:10:34,175 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
134
+ 2025-09-18 17:10:34,744 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9598
135
+ 2025-09-18 17:10:36,970 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.9739
136
+ 2025-09-18 17:10:37,183 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
137
+ 2025-09-18 17:10:37,757 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9739
138
+ 2025-09-18 17:10:42,531 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0291 | Val mean-roc_auc_score: 0.9738
139
+ 2025-09-18 17:10:47,200 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0296 | Val mean-roc_auc_score: 0.9725
140
+ 2025-09-18 17:10:51,353 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0332 | Val mean-roc_auc_score: 0.9756
141
+ 2025-09-18 17:10:51,548 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 185
142
+ 2025-09-18 17:10:52,143 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9756
143
+ 2025-09-18 17:10:56,125 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0164 | Val mean-roc_auc_score: 0.9826
144
+ 2025-09-18 17:10:56,728 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
145
+ 2025-09-18 17:10:57,417 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9826
146
+ 2025-09-18 17:11:02,006 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0219 | Val mean-roc_auc_score: 0.9794
147
+ 2025-09-18 17:11:06,714 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0178 | Val mean-roc_auc_score: 0.9800
148
+ 2025-09-18 17:11:09,147 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.9817
149
+ 2025-09-18 17:11:13,790 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9860
150
+ 2025-09-18 17:11:14,022 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 370
151
+ 2025-09-18 17:11:14,645 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9860
152
+ 2025-09-18 17:11:18,924 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.9879
153
+ 2025-09-18 17:11:19,524 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 407
154
+ 2025-09-18 17:11:20,113 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9879
155
+ 2025-09-18 17:11:24,593 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9879
156
+ 2025-09-18 17:11:29,226 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0272 | Val mean-roc_auc_score: 0.9832
157
+ 2025-09-18 17:11:34,119 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0302 | Val mean-roc_auc_score: 0.9877
158
+ 2025-09-18 17:11:36,618 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.9879
159
+ 2025-09-18 17:11:41,385 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9875
160
+ 2025-09-18 17:11:46,230 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9902
161
+ 2025-09-18 17:11:46,414 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 629
162
+ 2025-09-18 17:11:47,082 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val mean-roc_auc_score: 0.9902
163
+ 2025-09-18 17:11:51,592 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9887
164
+ 2025-09-18 17:11:56,316 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0155 | Val mean-roc_auc_score: 0.9867
165
+ 2025-09-18 17:12:01,627 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9909
166
+ 2025-09-18 17:12:01,817 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 740
167
+ 2025-09-18 17:12:02,409 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val mean-roc_auc_score: 0.9909
168
+ 2025-09-18 17:12:07,741 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.9891
169
+ 2025-09-18 17:12:11,129 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0271 | Val mean-roc_auc_score: 0.9869
170
+ 2025-09-18 17:12:15,712 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0162 | Val mean-roc_auc_score: 0.9775
171
+ 2025-09-18 17:12:20,322 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9841
172
+ 2025-09-18 17:12:25,313 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9867
173
+ 2025-09-18 17:12:30,319 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9862
174
+ 2025-09-18 17:12:36,370 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9891
175
+ 2025-09-18 17:12:38,162 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9864
176
+ 2025-09-18 17:12:43,042 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9903
177
+ 2025-09-18 17:12:48,166 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.9503
178
+ 2025-09-18 17:12:53,038 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9829
179
+ 2025-09-18 17:12:58,031 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9886
180
+ 2025-09-18 17:13:02,309 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9850
181
+ 2025-09-18 17:13:06,878 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0152 | Val mean-roc_auc_score: 0.9845
182
+ 2025-09-18 17:13:09,378 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.9895
183
+ 2025-09-18 17:13:14,109 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0203 | Val mean-roc_auc_score: 0.9786
184
+ 2025-09-18 17:13:18,840 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9835
185
+ 2025-09-18 17:13:23,482 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9830
186
+ 2025-09-18 17:13:27,837 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9863
187
+ 2025-09-18 17:13:32,872 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9836
188
+ 2025-09-18 17:13:37,584 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9858
189
+ 2025-09-18 17:13:40,050 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9841
190
+ 2025-09-18 17:13:44,159 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9841
191
+ 2025-09-18 17:13:48,678 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9841
192
+ 2025-09-18 17:13:53,379 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9829
193
+ 2025-09-18 17:13:58,090 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0004 | Val mean-roc_auc_score: 0.9834
194
+ 2025-09-18 17:14:03,368 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9802
195
+ 2025-09-18 17:14:07,695 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9810
196
+ 2025-09-18 17:14:09,288 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9820
197
+ 2025-09-18 17:14:14,092 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9844
198
+ 2025-09-18 17:14:18,847 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0399 | Val mean-roc_auc_score: 0.9902
199
+ 2025-09-18 17:14:23,732 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.9856
200
+ 2025-09-18 17:14:27,951 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.9876
201
+ 2025-09-18 17:14:32,565 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9868
202
+ 2025-09-18 17:14:37,891 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9869
203
+ 2025-09-18 17:14:40,110 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9857
204
+ 2025-09-18 17:14:45,093 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9867
205
+ 2025-09-18 17:14:49,301 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9874
206
+ 2025-09-18 17:14:53,599 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9875
207
+ 2025-09-18 17:14:58,370 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9867
208
+ 2025-09-18 17:15:03,241 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9870
209
+ 2025-09-18 17:15:08,189 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9867
210
+ 2025-09-18 17:15:10,191 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9872
211
+ 2025-09-18 17:15:14,671 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9884
212
+ 2025-09-18 17:15:19,416 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9873
213
+ 2025-09-18 17:15:24,105 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9860
214
+ 2025-09-18 17:15:29,231 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9860
215
+ 2025-09-18 17:15:33,280 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9860
216
+ 2025-09-18 17:15:37,691 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9846
217
+ 2025-09-18 17:15:39,849 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9878
218
+ 2025-09-18 17:15:44,666 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9856
219
+ 2025-09-18 17:15:49,989 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9849
220
+ 2025-09-18 17:15:54,244 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9849
221
+ 2025-09-18 17:15:58,563 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9849
222
+ 2025-09-18 17:16:03,229 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9844
223
+ 2025-09-18 17:16:08,120 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9870
224
+ 2025-09-18 17:16:10,272 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9868
225
+ 2025-09-18 17:16:14,775 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9852
226
+ 2025-09-18 17:16:19,719 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9893
227
+ 2025-09-18 17:16:24,390 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9887
228
+ 2025-09-18 17:16:28,780 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9887
229
+ 2025-09-18 17:16:34,815 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9887
230
+ 2025-09-18 17:16:39,036 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9881
231
+ 2025-09-18 17:16:40,772 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9887
232
+ 2025-09-18 17:16:45,658 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9893
233
+ 2025-09-18 17:16:50,023 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9893
234
+ 2025-09-18 17:16:54,458 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9901
235
+ 2025-09-18 17:16:58,844 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9904
236
+ 2025-09-18 17:17:03,740 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9893
237
+ 2025-09-18 17:17:08,633 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9887
238
+ 2025-09-18 17:17:11,110 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9887
239
+ 2025-09-18 17:17:16,595 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9887
240
+ 2025-09-18 17:17:21,205 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9881
241
+ 2025-09-18 17:17:26,308 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9881
242
+ 2025-09-18 17:17:31,056 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9881
243
+ 2025-09-18 17:17:35,690 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9886
244
+ 2025-09-18 17:17:37,940 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9881
245
+ 2025-09-18 17:17:42,131 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9884
246
+ 2025-09-18 17:17:47,297 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9870
247
+ 2025-09-18 17:17:52,700 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9884
248
+ 2025-09-18 17:17:53,059 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9396
249
+ 2025-09-18 17:17:53,477 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clintox at 2025-09-18_17-17-53
250
+ 2025-09-18 17:17:57,442 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1900 | Val mean-roc_auc_score: 0.9428
251
+ 2025-09-18 17:17:57,442 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
252
+ 2025-09-18 17:17:58,226 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9428
253
+ 2025-09-18 17:18:02,438 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0517 | Val mean-roc_auc_score: 0.9784
254
+ 2025-09-18 17:18:02,605 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
255
+ 2025-09-18 17:18:03,182 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9784
256
+ 2025-09-18 17:18:07,507 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0415 | Val mean-roc_auc_score: 0.9825
257
+ 2025-09-18 17:18:07,685 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 111
258
+ 2025-09-18 17:18:08,262 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9825
259
+ 2025-09-18 17:18:09,967 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0361 | Val mean-roc_auc_score: 0.9877
260
+ 2025-09-18 17:18:10,157 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 148
261
+ 2025-09-18 17:18:10,699 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9877
262
+ 2025-09-18 17:18:14,887 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0277 | Val mean-roc_auc_score: 0.9875
263
+ 2025-09-18 17:18:19,626 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0430 | Val mean-roc_auc_score: 0.9933
264
+ 2025-09-18 17:18:20,225 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
265
+ 2025-09-18 17:18:20,804 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9933
266
+ 2025-09-18 17:18:25,265 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0287 | Val mean-roc_auc_score: 0.9905
267
+ 2025-09-18 17:18:30,172 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0163 | Val mean-roc_auc_score: 0.9887
268
+ 2025-09-18 17:18:35,039 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.9932
269
+ 2025-09-18 17:18:39,430 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0159 | Val mean-roc_auc_score: 0.9944
270
+ 2025-09-18 17:18:39,584 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 370
271
+ 2025-09-18 17:18:37,684 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9944
272
+ 2025-09-18 17:18:41,849 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9943
273
+ 2025-09-18 17:18:46,635 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.9953
274
+ 2025-09-18 17:18:46,815 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 444
275
+ 2025-09-18 17:18:47,366 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9953
276
+ 2025-09-18 17:18:51,730 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9922
277
+ 2025-09-18 17:18:56,135 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0213 | Val mean-roc_auc_score: 0.9948
278
+ 2025-09-18 17:19:00,951 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0216 | Val mean-roc_auc_score: 0.9953
279
+ 2025-09-18 17:19:05,849 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9942
280
+ 2025-09-18 17:19:08,320 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0104 | Val mean-roc_auc_score: 0.9938
281
+ 2025-09-18 17:19:12,602 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9854
282
+ 2025-09-18 17:19:16,824 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0247 | Val mean-roc_auc_score: 0.9894
283
+ 2025-09-18 17:19:21,594 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0113 | Val mean-roc_auc_score: 0.9917
284
+ 2025-09-18 17:19:26,443 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9943
285
+ 2025-09-18 17:19:31,554 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9948
286
+ 2025-09-18 17:19:35,402 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9938
287
+ 2025-09-18 17:19:37,687 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9948
288
+ 2025-09-18 17:19:42,393 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9946
289
+ 2025-09-18 17:19:46,721 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9933
290
+ 2025-09-18 17:19:52,289 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9954
291
+ 2025-09-18 17:19:52,448 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 999
292
+ 2025-09-18 17:19:53,446 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val mean-roc_auc_score: 0.9954
293
+ 2025-09-18 17:19:58,011 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9927
294
+ 2025-09-18 17:20:02,254 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9939
295
+ 2025-09-18 17:20:06,417 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9933
296
+ 2025-09-18 17:20:08,744 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0247 | Val mean-roc_auc_score: 0.9914
297
+ 2025-09-18 17:20:13,828 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0131 | Val mean-roc_auc_score: 0.9949
298
+ 2025-09-18 17:20:17,877 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.9932
299
+ 2025-09-18 17:20:22,049 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.9956
300
+ 2025-09-18 17:20:22,246 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1258
301
+ 2025-09-18 17:20:22,810 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 34 with val mean-roc_auc_score: 0.9956
302
+ 2025-09-18 17:20:27,563 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9966
303
+ 2025-09-18 17:20:27,745 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1295
304
+ 2025-09-18 17:20:28,342 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 35 with val mean-roc_auc_score: 0.9966
305
+ 2025-09-18 17:20:33,102 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.9956
306
+ 2025-09-18 17:20:38,200 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9938
307
+ 2025-09-18 17:20:39,988 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9943
308
+ 2025-09-18 17:20:43,990 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9949
309
+ 2025-09-18 17:20:48,767 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9944
310
+ 2025-09-18 17:20:53,235 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9949
311
+ 2025-09-18 17:20:57,537 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9933
312
+ 2025-09-18 17:21:02,314 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9938
313
+ 2025-09-18 17:21:07,030 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9922
314
+ 2025-09-18 17:21:08,924 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9957
315
+ 2025-09-18 17:21:13,064 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.9915
316
+ 2025-09-18 17:21:17,708 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9936
317
+ 2025-09-18 17:21:22,348 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9909
318
+ 2025-09-18 17:21:26,805 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9909
319
+ 2025-09-18 17:21:30,980 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9921
320
+ 2025-09-18 17:21:35,230 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9898
321
+ 2025-09-18 17:21:42,903 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9913
322
+ 2025-09-18 17:21:41,941 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9910
323
+ 2025-09-18 17:21:46,117 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9910
324
+ 2025-09-18 17:21:50,989 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9899
325
+ 2025-09-18 17:21:55,286 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9902
326
+ 2025-09-18 17:22:00,112 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9939
327
+ 2025-09-18 17:22:05,284 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9932
328
+ 2025-09-18 17:22:08,054 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9949
329
+ 2025-09-18 17:22:13,002 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9955
330
+ 2025-09-18 17:22:17,311 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9933
331
+ 2025-09-18 17:22:22,393 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9928
332
+ 2025-09-18 17:22:27,118 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9939
333
+ 2025-09-18 17:22:32,096 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9944
334
+ 2025-09-18 17:22:36,885 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0270 | Val mean-roc_auc_score: 0.9925
335
+ 2025-09-18 17:22:39,058 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.9943
336
+ 2025-09-18 17:22:43,731 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9953
337
+ 2025-09-18 17:22:48,542 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9953
338
+ 2025-09-18 17:22:53,673 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9960
339
+ 2025-09-18 17:22:58,332 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9948
340
+ 2025-09-18 17:23:02,736 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9960
341
+ 2025-09-18 17:23:07,791 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9953
342
+ 2025-09-18 17:23:10,151 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0001 | Val mean-roc_auc_score: 0.9953
343
+ 2025-09-18 17:23:15,223 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9953
344
+ 2025-09-18 17:23:19,691 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9945
345
+ 2025-09-18 17:23:23,887 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9945
346
+ 2025-09-18 17:23:29,057 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9950
347
+ 2025-09-18 17:23:33,834 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9940
348
+ 2025-09-18 17:23:38,044 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9965
349
+ 2025-09-18 17:23:39,457 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9943
350
+ 2025-09-18 17:23:44,108 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9954
351
+ 2025-09-18 17:23:49,862 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9943
352
+ 2025-09-18 17:23:54,502 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9938
353
+ 2025-09-18 17:23:59,353 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9943
354
+ 2025-09-18 17:24:03,708 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9938
355
+ 2025-09-18 17:24:07,807 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9932
356
+ 2025-09-18 17:24:09,989 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9926
357
+ 2025-09-18 17:24:14,474 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9932
358
+ 2025-09-18 17:24:19,144 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9938
359
+ 2025-09-18 17:24:23,717 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9938
360
+ 2025-09-18 17:24:28,245 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9927
361
+ 2025-09-18 17:24:32,662 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9932
362
+ 2025-09-18 17:24:37,305 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9932
363
+ 2025-09-18 17:24:39,498 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9932
364
+ 2025-09-18 17:24:43,874 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9938
365
+ 2025-09-18 17:24:47,997 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9938
366
+ 2025-09-18 17:24:52,543 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9922
367
+ 2025-09-18 17:24:56,937 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9932
368
+ 2025-09-18 17:25:01,766 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9927
369
+ 2025-09-18 17:25:06,153 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9943
370
+ 2025-09-18 17:25:06,567 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9803
371
+ 2025-09-18 17:25:06,983 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.9709, Std Dev: 0.0227
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250922_102753.log ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-22 10:27:53,331 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Running benchmark for dataset: hiv
2
+ 2025-09-22 10:27:53,332 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - dataset: hiv, tasks: ['HIV_active'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-22 10:27:53,337 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset hiv at 2025-09-22_10-27-53
4
+ 2025-09-22 10:29:30,748 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1279 | Val mean-roc_auc_score: 0.8207
5
+ 2025-09-22 10:29:30,748 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
6
+ 2025-09-22 10:29:31,270 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8207
7
+ 2025-09-22 10:31:14,830 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1042 | Val mean-roc_auc_score: 0.8409
8
+ 2025-09-22 10:31:14,975 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
9
+ 2025-09-22 10:31:15,514 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8409
10
+ 2025-09-22 10:32:58,284 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1088 | Val mean-roc_auc_score: 0.8423
11
+ 2025-09-22 10:32:58,428 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 3081
12
+ 2025-09-22 10:32:58,998 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.8423
13
+ 2025-09-22 10:34:42,838 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.8402
14
+ 2025-09-22 10:36:26,003 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0558 | Val mean-roc_auc_score: 0.8162
15
+ 2025-09-22 10:38:10,706 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0401 | Val mean-roc_auc_score: 0.8302
16
+ 2025-09-22 10:39:53,737 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0390 | Val mean-roc_auc_score: 0.8347
17
+ 2025-09-22 10:41:36,525 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0309 | Val mean-roc_auc_score: 0.8476
18
+ 2025-09-22 10:41:36,670 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 8216
19
+ 2025-09-22 10:41:37,205 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.8476
20
+ 2025-09-22 10:43:19,858 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0336 | Val mean-roc_auc_score: 0.8278
21
+ 2025-09-22 10:45:03,546 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0239 | Val mean-roc_auc_score: 0.8269
22
+ 2025-09-22 10:46:47,041 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.8248
23
+ 2025-09-22 10:48:31,075 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0180 | Val mean-roc_auc_score: 0.8239
24
+ 2025-09-22 10:50:13,924 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.8219
25
+ 2025-09-22 10:51:58,238 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.8145
26
+ 2025-09-22 10:53:40,819 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.8173
27
+ 2025-09-22 10:55:26,170 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.8156
28
+ 2025-09-22 10:57:09,366 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0224 | Val mean-roc_auc_score: 0.8160
29
+ 2025-09-22 10:58:52,447 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0176 | Val mean-roc_auc_score: 0.8190
30
+ 2025-09-22 11:00:37,614 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.8234
31
+ 2025-09-22 11:02:21,246 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.8278
32
+ 2025-09-22 11:04:07,152 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.8167
33
+ 2025-09-22 11:05:52,097 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0150 | Val mean-roc_auc_score: 0.8219
34
+ 2025-09-22 11:07:35,782 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.8239
35
+ 2025-09-22 11:09:18,868 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.8170
36
+ 2025-09-22 11:11:02,546 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.8175
37
+ 2025-09-22 11:12:44,613 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.8211
38
+ 2025-09-22 11:14:28,727 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.8277
39
+ 2025-09-22 11:16:10,882 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.8200
40
+ 2025-09-22 11:17:55,259 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8169
41
+ 2025-09-22 11:19:37,674 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.8162
42
+ 2025-09-22 11:21:21,746 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.8171
43
+ 2025-09-22 11:23:04,573 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.8185
44
+ 2025-09-22 11:24:47,735 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.8238
45
+ 2025-09-22 11:26:31,054 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8235
46
+ 2025-09-22 11:28:14,670 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8273
47
+ 2025-09-22 11:29:59,720 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.8255
48
+ 2025-09-22 11:31:45,533 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.8210
49
+ 2025-09-22 11:33:29,489 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8217
50
+ 2025-09-22 11:35:12,648 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.8211
51
+ 2025-09-22 11:36:56,640 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.8246
52
+ 2025-09-22 11:38:39,944 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8256
53
+ 2025-09-22 11:40:25,650 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8231
54
+ 2025-09-22 11:42:08,396 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.8242
55
+ 2025-09-22 11:43:52,184 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8260
56
+ 2025-09-22 11:45:34,337 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8254
57
+ 2025-09-22 11:47:19,737 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.8222
58
+ 2025-09-22 11:49:02,780 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8268
59
+ 2025-09-22 11:50:45,874 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8258
60
+ 2025-09-22 11:52:30,250 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8257
61
+ 2025-09-22 11:54:13,158 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.8306
62
+ 2025-09-22 11:55:57,593 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8281
63
+ 2025-09-22 11:57:40,665 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0337 | Val mean-roc_auc_score: 0.8320
64
+ 2025-09-22 11:59:24,796 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8319
65
+ 2025-09-22 12:01:08,015 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.8305
66
+ 2025-09-22 12:02:51,878 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8277
67
+ 2025-09-22 12:04:36,775 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.8265
68
+ 2025-09-22 12:06:20,938 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8232
69
+ 2025-09-22 12:08:02,564 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8257
70
+ 2025-09-22 12:09:47,175 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8268
71
+ 2025-09-22 12:11:28,759 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8265
72
+ 2025-09-22 12:13:13,113 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8291
73
+ 2025-09-22 12:14:56,446 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8285
74
+ 2025-09-22 12:16:39,253 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.8293
75
+ 2025-09-22 12:18:22,432 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8282
76
+ 2025-09-22 12:20:06,222 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.8288
77
+ 2025-09-22 12:21:50,194 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8279
78
+ 2025-09-22 12:23:32,851 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8292
79
+ 2025-09-22 12:25:16,450 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8294
80
+ 2025-09-22 12:27:00,416 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8296
81
+ 2025-09-22 12:28:44,202 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8297
82
+ 2025-09-22 12:30:27,444 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8302
83
+ 2025-09-22 12:32:11,122 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8291
84
+ 2025-09-22 12:33:52,806 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8310
85
+ 2025-09-22 12:35:36,295 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8299
86
+ 2025-09-22 12:37:20,532 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8313
87
+ 2025-09-22 12:39:04,587 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.8303
88
+ 2025-09-22 12:40:47,589 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8307
89
+ 2025-09-22 12:42:30,244 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.8280
90
+ 2025-09-22 12:44:14,518 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8282
91
+ 2025-09-22 12:45:57,727 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8274
92
+ 2025-09-22 12:47:41,561 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8295
93
+ 2025-09-22 12:49:24,781 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8295
94
+ 2025-09-22 12:51:08,354 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8284
95
+ 2025-09-22 12:52:52,238 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.8276
96
+ 2025-09-22 12:54:35,443 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.8259
97
+ 2025-09-22 12:56:17,105 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8301
98
+ 2025-09-22 12:58:04,811 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8312
99
+ 2025-09-22 12:59:46,273 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8284
100
+ 2025-09-22 13:01:29,429 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.8304
101
+ 2025-09-22 13:03:11,073 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8304
102
+ 2025-09-22 13:04:54,486 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8288
103
+ 2025-09-22 13:06:37,619 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8283
104
+ 2025-09-22 13:08:19,539 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8305
105
+ 2025-09-22 13:10:02,703 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.8303
106
+ 2025-09-22 13:11:45,486 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8309
107
+ 2025-09-22 13:13:30,001 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8286
108
+ 2025-09-22 13:15:12,551 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.8297
109
+ 2025-09-22 13:16:56,314 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8287
110
+ 2025-09-22 13:18:39,115 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8314
111
+ 2025-09-22 13:20:22,607 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8305
112
+ 2025-09-22 13:20:28,154 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7874
113
+ 2025-09-22 13:20:28,667 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset hiv at 2025-09-22_13-20-28
114
+ 2025-09-22 13:22:02,986 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1169 | Val mean-roc_auc_score: 0.8282
115
+ 2025-09-22 13:22:02,986 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
116
+ 2025-09-22 13:22:03,509 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8282
117
+ 2025-09-22 13:23:47,390 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1088 | Val mean-roc_auc_score: 0.8225
118
+ 2025-09-22 13:25:30,628 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0721 | Val mean-roc_auc_score: 0.8218
119
+ 2025-09-22 13:27:13,790 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1104 | Val mean-roc_auc_score: 0.8116
120
+ 2025-09-22 13:28:58,144 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0647 | Val mean-roc_auc_score: 0.7769
121
+ 2025-09-22 13:30:41,712 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0431 | Val mean-roc_auc_score: 0.7958
122
+ 2025-09-22 13:32:24,282 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0381 | Val mean-roc_auc_score: 0.7877
123
+ 2025-09-22 13:34:07,821 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0184 | Val mean-roc_auc_score: 0.7764
124
+ 2025-09-22 13:35:51,220 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0251 | Val mean-roc_auc_score: 0.7698
125
+ 2025-09-22 13:37:34,538 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0232 | Val mean-roc_auc_score: 0.7705
126
+ 2025-09-22 13:39:17,938 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.7791
127
+ 2025-09-22 13:41:01,523 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.7895
128
+ 2025-09-22 13:42:43,615 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7765
129
+ 2025-09-22 13:44:26,730 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.7781
130
+ 2025-09-22 13:46:10,341 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7793
131
+ 2025-09-22 13:47:52,409 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.7737
132
+ 2025-09-22 13:49:36,930 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.7955
133
+ 2025-09-22 13:51:19,330 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.7833
134
+ 2025-09-22 13:53:02,689 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7771
135
+ 2025-09-22 13:54:45,468 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7801
136
+ 2025-09-22 13:56:29,447 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.7787
137
+ 2025-09-22 13:58:11,831 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7748
138
+ 2025-09-22 13:59:54,815 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.7825
139
+ 2025-09-22 14:01:37,786 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.7836
140
+ 2025-09-22 14:03:21,626 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7792
141
+ 2025-09-22 14:05:03,882 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.7781
142
+ 2025-09-22 14:06:47,682 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7794
143
+ 2025-09-22 14:08:30,572 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.7884
144
+ 2025-09-22 14:10:13,713 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.7867
145
+ 2025-09-22 14:11:56,962 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7817
146
+ 2025-09-22 14:13:39,239 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.7868
147
+ 2025-09-22 14:15:22,975 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7940
148
+ 2025-09-22 14:17:04,561 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.7928
149
+ 2025-09-22 14:18:49,262 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7820
150
+ 2025-09-22 14:20:31,623 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.7874
151
+ 2025-09-22 14:22:15,298 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7850
152
+ 2025-09-22 14:23:59,699 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.7799
153
+ 2025-09-22 14:25:44,096 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.7785
154
+ 2025-09-22 14:27:26,730 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7800
155
+ 2025-09-22 14:29:09,636 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7716
156
+ 2025-09-22 14:30:52,661 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.7824
157
+ 2025-09-22 14:32:35,639 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.7774
158
+ 2025-09-22 14:34:18,111 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.7816
159
+ 2025-09-22 14:36:02,062 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7812
160
+ 2025-09-22 14:37:44,496 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7851
161
+ 2025-09-22 14:39:26,924 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.7871
162
+ 2025-09-22 14:41:10,884 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7874
163
+ 2025-09-22 14:42:53,313 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.7861
164
+ 2025-09-22 14:44:36,961 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.7852
165
+ 2025-09-22 14:46:19,474 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.7886
166
+ 2025-09-22 14:48:03,362 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7911
167
+ 2025-09-22 14:49:14,309 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7872
168
+ 2025-09-22 14:50:18,609 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.7918
169
+ 2025-09-22 14:51:22,850 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7844
170
+ 2025-09-22 14:52:26,692 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.7844
171
+ 2025-09-22 14:53:31,061 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7872
172
+ 2025-09-22 14:54:35,283 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7843
173
+ 2025-09-22 14:55:39,041 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.7855
174
+ 2025-09-22 14:56:43,131 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7878
175
+ 2025-09-22 14:57:46,800 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7849
176
+ 2025-09-22 14:58:51,148 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.7873
177
+ 2025-09-22 14:59:55,132 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.7839
178
+ 2025-09-22 15:00:58,854 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0000 | Val mean-roc_auc_score: 0.7899
179
+ 2025-09-22 15:02:03,140 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7929
180
+ 2025-09-22 15:03:06,887 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.7893
181
+ 2025-09-22 15:04:11,008 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7846
182
+ 2025-09-22 15:05:15,254 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.7843
183
+ 2025-09-22 15:06:18,992 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7863
184
+ 2025-09-22 15:07:23,287 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7975
185
+ 2025-09-22 15:08:26,840 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.7976
186
+ 2025-09-22 15:09:30,540 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7969
187
+ 2025-09-22 15:10:34,835 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.7958
188
+ 2025-09-22 15:11:38,504 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.7945
189
+ 2025-09-22 15:12:42,954 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.7943
190
+ 2025-09-22 15:13:48,082 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7926
191
+ 2025-09-22 15:14:52,405 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7906
192
+ 2025-09-22 15:15:56,828 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7942
193
+ 2025-09-22 15:17:00,752 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7940
194
+ 2025-09-22 15:18:04,863 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.7929
195
+ 2025-09-22 15:19:08,889 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7893
196
+ 2025-09-22 15:20:13,258 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7899
197
+ 2025-09-22 15:21:17,849 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7892
198
+ 2025-09-22 15:22:21,775 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7880
199
+ 2025-09-22 15:23:25,956 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7890
200
+ 2025-09-22 15:24:29,892 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.7922
201
+ 2025-09-22 15:25:34,157 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7921
202
+ 2025-09-22 15:26:38,484 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.7880
203
+ 2025-09-22 15:27:42,328 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7921
204
+ 2025-09-22 15:28:46,516 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7922
205
+ 2025-09-22 15:29:50,326 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.7901
206
+ 2025-09-22 15:30:54,562 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7901
207
+ 2025-09-22 15:31:58,693 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7880
208
+ 2025-09-22 15:33:02,429 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7890
209
+ 2025-09-22 15:34:06,780 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.7931
210
+ 2025-09-22 15:35:10,530 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7928
211
+ 2025-09-22 15:36:14,753 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7917
212
+ 2025-09-22 15:37:18,763 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.7929
213
+ 2025-09-22 15:38:22,367 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7919
214
+ 2025-09-22 15:39:26,395 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7942
215
+ 2025-09-22 15:40:30,048 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7946
216
+ 2025-09-22 15:40:33,572 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7913
217
+ 2025-09-22 15:40:34,221 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset hiv at 2025-09-22_15-40-34
218
+ 2025-09-22 15:41:30,427 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1042 | Val mean-roc_auc_score: 0.7999
219
+ 2025-09-22 15:41:30,427 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
220
+ 2025-09-22 15:41:30,944 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7999
221
+ 2025-09-22 15:42:35,032 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1024 | Val mean-roc_auc_score: 0.8058
222
+ 2025-09-22 15:42:35,170 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
223
+ 2025-09-22 15:42:35,690 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8058
224
+ 2025-09-22 15:43:39,506 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1173 | Val mean-roc_auc_score: 0.7990
225
+ 2025-09-22 15:44:43,276 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0386 | Val mean-roc_auc_score: 0.8505
226
+ 2025-09-22 15:44:43,420 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 4108
227
+ 2025-09-22 15:44:43,933 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.8505
228
+ 2025-09-22 15:45:48,287 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0533 | Val mean-roc_auc_score: 0.8304
229
+ 2025-09-22 15:46:51,994 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0494 | Val mean-roc_auc_score: 0.8083
230
+ 2025-09-22 15:47:56,183 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0388 | Val mean-roc_auc_score: 0.8358
231
+ 2025-09-22 15:49:00,011 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0396 | Val mean-roc_auc_score: 0.8189
232
+ 2025-09-22 15:50:04,080 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0267 | Val mean-roc_auc_score: 0.8203
233
+ 2025-09-22 15:51:07,729 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0235 | Val mean-roc_auc_score: 0.8295
234
+ 2025-09-22 15:52:12,639 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0205 | Val mean-roc_auc_score: 0.8006
235
+ 2025-09-22 15:53:17,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.7950
236
+ 2025-09-22 15:54:21,080 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0247 | Val mean-roc_auc_score: 0.8084
237
+ 2025-09-22 15:55:25,234 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.7820
238
+ 2025-09-22 15:56:28,819 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.7968
239
+ 2025-09-22 15:57:33,029 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0190 | Val mean-roc_auc_score: 0.7935
240
+ 2025-09-22 15:58:37,049 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.7996
241
+ 2025-09-22 15:59:40,821 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.8090
242
+ 2025-09-22 16:00:45,588 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.8062
243
+ 2025-09-22 16:01:49,234 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.8045
244
+ 2025-09-22 16:02:53,383 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.8065
245
+ 2025-09-22 16:03:57,535 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0130 | Val mean-roc_auc_score: 0.8056
246
+ 2025-09-22 16:05:01,420 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0156 | Val mean-roc_auc_score: 0.8048
247
+ 2025-09-22 16:06:05,987 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0125 | Val mean-roc_auc_score: 0.8035
248
+ 2025-09-22 16:07:09,605 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.8061
249
+ 2025-09-22 16:08:14,055 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.7993
250
+ 2025-09-22 16:09:18,389 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.7996
251
+ 2025-09-22 16:10:21,848 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7954
252
+ 2025-09-22 16:11:25,779 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.7994
253
+ 2025-09-22 16:12:29,592 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.7956
254
+ 2025-09-22 16:13:34,310 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.8004
255
+ 2025-09-22 16:14:39,067 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7935
256
+ 2025-09-22 16:15:43,209 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0124 | Val mean-roc_auc_score: 0.8004
257
+ 2025-09-22 16:16:47,782 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7913
258
+ 2025-09-22 16:17:51,386 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7866
259
+ 2025-09-22 16:18:55,722 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7992
260
+ 2025-09-22 16:20:01,154 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.7967
261
+ 2025-09-22 16:21:04,932 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.7979
262
+ 2025-09-22 16:22:09,341 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.7992
263
+ 2025-09-22 16:23:13,353 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7903
264
+ 2025-09-22 16:24:17,758 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0172 | Val mean-roc_auc_score: 0.8036
265
+ 2025-09-22 16:25:22,034 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.8035
266
+ 2025-09-22 16:26:25,946 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8065
267
+ 2025-09-22 16:27:30,189 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.8025
268
+ 2025-09-22 16:28:33,929 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8004
269
+ 2025-09-22 16:29:38,139 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.8090
270
+ 2025-09-22 16:30:42,574 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.8043
271
+ 2025-09-22 16:31:46,133 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.8045
272
+ 2025-09-22 16:32:50,601 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8002
273
+ 2025-09-22 16:33:54,497 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.8001
274
+ 2025-09-22 16:34:58,704 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7921
275
+ 2025-09-22 16:36:03,241 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.7965
276
+ 2025-09-22 16:37:06,832 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.8009
277
+ 2025-09-22 16:38:11,081 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.8005
278
+ 2025-09-22 16:39:14,659 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.8011
279
+ 2025-09-22 16:40:19,382 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7943
280
+ 2025-09-22 16:41:23,590 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7983
281
+ 2025-09-22 16:42:27,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7964
282
+ 2025-09-22 16:43:31,507 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.7937
283
+ 2025-09-22 16:44:35,308 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8001
284
+ 2025-09-22 16:45:39,653 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.8022
285
+ 2025-09-22 16:46:43,892 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8018
286
+ 2025-09-22 16:47:48,031 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.8003
287
+ 2025-09-22 16:48:52,339 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.7974
288
+ 2025-09-22 16:49:56,103 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.7991
289
+ 2025-09-22 16:51:00,290 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7957
290
+ 2025-09-22 16:52:04,577 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.7930
291
+ 2025-09-22 16:53:08,624 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8002
292
+ 2025-09-22 16:54:12,964 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.7986
293
+ 2025-09-22 16:55:16,637 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7956
294
+ 2025-09-22 16:56:20,935 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.7934
295
+ 2025-09-22 16:57:25,237 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7924
296
+ 2025-09-22 16:58:29,080 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.7858
297
+ 2025-09-22 16:59:33,386 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7945
298
+ 2025-09-22 17:00:38,215 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7999
299
+ 2025-09-22 17:01:42,584 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7998
300
+ 2025-09-22 17:02:46,955 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7972
301
+ 2025-09-22 17:03:50,407 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.7970
302
+ 2025-09-22 17:04:54,641 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7949
303
+ 2025-09-22 17:05:58,611 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7988
304
+ 2025-09-22 17:07:03,311 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.7968
305
+ 2025-09-22 17:08:07,977 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7941
306
+ 2025-09-22 17:09:11,682 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7952
307
+ 2025-09-22 17:10:16,069 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7975
308
+ 2025-09-22 17:11:19,899 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7918
309
+ 2025-09-22 17:12:23,136 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.7917
310
+ 2025-09-22 17:13:25,600 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8012
311
+ 2025-09-22 17:14:27,393 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.7975
312
+ 2025-09-22 17:15:29,861 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.7993
313
+ 2025-09-22 17:16:31,566 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8035
314
+ 2025-09-22 17:17:33,872 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8029
315
+ 2025-09-22 17:18:36,325 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7961
316
+ 2025-09-22 17:19:38,175 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.7974
317
+ 2025-09-22 17:20:40,679 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7986
318
+ 2025-09-22 17:21:42,301 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7997
319
+ 2025-09-22 17:22:44,705 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.7978
320
+ 2025-09-22 17:23:47,153 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7934
321
+ 2025-09-22 17:24:48,860 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.7931
322
+ 2025-09-22 17:25:51,268 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7939
323
+ 2025-09-22 17:26:53,144 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7977
324
+ 2025-09-22 17:26:56,552 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7614
325
+ 2025-09-22 17:26:57,359 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7800, Std Dev: 0.0133
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250918_164215.log ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 16:42:15,144 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Running benchmark for dataset: sider
2
+ 2025-09-18 16:42:15,144 - logs_modchembert_sider_epochs100_batch_size32 - INFO - dataset: sider, tasks: ['Hepatobiliary disorders', 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders', 'Investigations', 'Musculoskeletal and connective tissue disorders', 'Gastrointestinal disorders', 'Social circumstances', 'Immune system disorders', 'Reproductive system and breast disorders', 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)', 'General disorders and administration site conditions', 'Endocrine disorders', 'Surgical and medical procedures', 'Vascular disorders', 'Blood and lymphatic system disorders', 'Skin and subcutaneous tissue disorders', 'Congenital, familial and genetic disorders', 'Infections and infestations', 'Respiratory, thoracic and mediastinal disorders', 'Psychiatric disorders', 'Renal and urinary disorders', 'Pregnancy, puerperium and perinatal conditions', 'Ear and labyrinth disorders', 'Cardiac disorders', 'Nervous system disorders', 'Injury, poisoning and procedural complications'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 16:42:15,150 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset sider at 2025-09-18_16-42-15
4
+ 2025-09-18 16:42:18,099 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5321 | Val mean-roc_auc_score: 0.5313
5
+ 2025-09-18 16:42:18,099 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
6
+ 2025-09-18 16:42:18,914 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5313
7
+ 2025-09-18 16:42:22,758 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5107 | Val mean-roc_auc_score: 0.5564
8
+ 2025-09-18 16:42:22,917 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
9
+ 2025-09-18 16:42:23,398 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5564
10
+ 2025-09-18 16:42:26,898 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.5000 | Val mean-roc_auc_score: 0.5510
11
+ 2025-09-18 16:42:30,734 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4821 | Val mean-roc_auc_score: 0.5662
12
+ 2025-09-18 16:42:30,898 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
13
+ 2025-09-18 16:42:31,377 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5662
14
+ 2025-09-18 16:42:34,848 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4643 | Val mean-roc_auc_score: 0.5321
15
+ 2025-09-18 16:42:35,813 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4469 | Val mean-roc_auc_score: 0.5598
16
+ 2025-09-18 16:42:39,657 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4018 | Val mean-roc_auc_score: 0.5642
17
+ 2025-09-18 16:42:43,948 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3625 | Val mean-roc_auc_score: 0.5913
18
+ 2025-09-18 16:42:44,119 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
19
+ 2025-09-18 16:42:44,696 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.5913
20
+ 2025-09-18 16:42:49,365 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3458 | Val mean-roc_auc_score: 0.5925
21
+ 2025-09-18 16:42:49,558 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
22
+ 2025-09-18 16:42:50,123 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.5925
23
+ 2025-09-18 16:42:55,228 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3018 | Val mean-roc_auc_score: 0.5965
24
+ 2025-09-18 16:42:55,436 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 350
25
+ 2025-09-18 16:42:56,011 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.5965
26
+ 2025-09-18 16:43:00,240 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2839 | Val mean-roc_auc_score: 0.5928
27
+ 2025-09-18 16:43:04,973 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2672 | Val mean-roc_auc_score: 0.5794
28
+ 2025-09-18 16:43:06,258 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2464 | Val mean-roc_auc_score: 0.5822
29
+ 2025-09-18 16:43:10,423 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2339 | Val mean-roc_auc_score: 0.5841
30
+ 2025-09-18 16:43:14,630 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2275 | Val mean-roc_auc_score: 0.5956
31
+ 2025-09-18 16:43:18,894 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2089 | Val mean-roc_auc_score: 0.6005
32
+ 2025-09-18 16:43:19,571 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 560
33
+ 2025-09-18 16:43:20,078 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val mean-roc_auc_score: 0.6005
34
+ 2025-09-18 16:43:23,810 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1982 | Val mean-roc_auc_score: 0.5766
35
+ 2025-09-18 16:43:27,477 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1938 | Val mean-roc_auc_score: 0.5855
36
+ 2025-09-18 16:43:31,065 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1839 | Val mean-roc_auc_score: 0.5883
37
+ 2025-09-18 16:43:34,597 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1821 | Val mean-roc_auc_score: 0.5851
38
+ 2025-09-18 16:43:35,844 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1723 | Val mean-roc_auc_score: 0.5888
39
+ 2025-09-18 16:43:40,202 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1643 | Val mean-roc_auc_score: 0.5835
40
+ 2025-09-18 16:43:43,808 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1703 | Val mean-roc_auc_score: 0.5873
41
+ 2025-09-18 16:43:47,387 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1527 | Val mean-roc_auc_score: 0.5789
42
+ 2025-09-18 16:43:50,970 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1500 | Val mean-roc_auc_score: 0.5768
43
+ 2025-09-18 16:43:54,768 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1508 | Val mean-roc_auc_score: 0.5772
44
+ 2025-09-18 16:43:58,665 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1464 | Val mean-roc_auc_score: 0.5849
45
+ 2025-09-18 16:44:02,317 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1366 | Val mean-roc_auc_score: 0.5963
46
+ 2025-09-18 16:44:04,413 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1458 | Val mean-roc_auc_score: 0.5818
47
+ 2025-09-18 16:44:07,966 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1411 | Val mean-roc_auc_score: 0.5791
48
+ 2025-09-18 16:44:11,448 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1330 | Val mean-roc_auc_score: 0.6001
49
+ 2025-09-18 16:44:15,291 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1344 | Val mean-roc_auc_score: 0.5836
50
+ 2025-09-18 16:44:18,850 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1259 | Val mean-roc_auc_score: 0.5843
51
+ 2025-09-18 16:44:22,468 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.5766
52
+ 2025-09-18 16:44:26,095 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1169 | Val mean-roc_auc_score: 0.5840
53
+ 2025-09-18 16:44:29,601 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1134 | Val mean-roc_auc_score: 0.5863
54
+ 2025-09-18 16:44:33,696 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.5794
55
+ 2025-09-18 16:44:34,753 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1156 | Val mean-roc_auc_score: 0.5896
56
+ 2025-09-18 16:44:38,386 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.5888
57
+ 2025-09-18 16:44:41,931 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.5841
58
+ 2025-09-18 16:44:45,406 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1116 | Val mean-roc_auc_score: 0.5794
59
+ 2025-09-18 16:44:49,428 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1080 | Val mean-roc_auc_score: 0.5910
60
+ 2025-09-18 16:44:53,152 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1156 | Val mean-roc_auc_score: 0.5883
61
+ 2025-09-18 16:44:57,198 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1031 | Val mean-roc_auc_score: 0.5919
62
+ 2025-09-18 16:45:01,112 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1054 | Val mean-roc_auc_score: 0.5859
63
+ 2025-09-18 16:45:04,668 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.5861
64
+ 2025-09-18 16:45:06,538 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0987 | Val mean-roc_auc_score: 0.5908
65
+ 2025-09-18 16:45:10,647 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0991 | Val mean-roc_auc_score: 0.5924
66
+ 2025-09-18 16:45:14,464 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.1099 | Val mean-roc_auc_score: 0.5838
67
+ 2025-09-18 16:45:17,991 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0969 | Val mean-roc_auc_score: 0.5794
68
+ 2025-09-18 16:45:21,507 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0955 | Val mean-roc_auc_score: 0.5704
69
+ 2025-09-18 16:45:25,614 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0992 | Val mean-roc_auc_score: 0.5851
70
+ 2025-09-18 16:45:28,971 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0915 | Val mean-roc_auc_score: 0.5910
71
+ 2025-09-18 16:45:32,477 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0946 | Val mean-roc_auc_score: 0.5796
72
+ 2025-09-18 16:45:35,994 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0912 | Val mean-roc_auc_score: 0.5867
73
+ 2025-09-18 16:45:37,094 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0920 | Val mean-roc_auc_score: 0.5811
74
+ 2025-09-18 16:45:41,610 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0911 | Val mean-roc_auc_score: 0.5934
75
+ 2025-09-18 16:45:46,187 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0948 | Val mean-roc_auc_score: 0.5832
76
+ 2025-09-18 16:45:49,741 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0888 | Val mean-roc_auc_score: 0.5796
77
+ 2025-09-18 16:45:53,243 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0911 | Val mean-roc_auc_score: 0.5826
78
+ 2025-09-18 16:45:56,735 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0911 | Val mean-roc_auc_score: 0.5822
79
+ 2025-09-18 16:46:00,566 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.5716
80
+ 2025-09-18 16:46:03,904 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0914 | Val mean-roc_auc_score: 0.5819
81
+ 2025-09-18 16:46:05,008 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.5872
82
+ 2025-09-18 16:46:08,507 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.5838
83
+ 2025-09-18 16:46:11,811 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0922 | Val mean-roc_auc_score: 0.5831
84
+ 2025-09-18 16:46:15,661 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5779
85
+ 2025-09-18 16:46:18,986 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0826 | Val mean-roc_auc_score: 0.5744
86
+ 2025-09-18 16:46:22,784 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0865 | Val mean-roc_auc_score: 0.5812
87
+ 2025-09-18 16:46:26,484 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.5829
88
+ 2025-09-18 16:46:30,117 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.5755
89
+ 2025-09-18 16:46:34,076 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0840 | Val mean-roc_auc_score: 0.5879
90
+ 2025-09-18 16:46:34,941 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.5893
91
+ 2025-09-18 16:46:38,334 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.5849
92
+ 2025-09-18 16:46:41,815 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.5699
93
+ 2025-09-18 16:46:45,472 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.5794
94
+ 2025-09-18 16:46:49,491 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0808 | Val mean-roc_auc_score: 0.5843
95
+ 2025-09-18 16:46:52,893 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.5831
96
+ 2025-09-18 16:46:56,421 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.5854
97
+ 2025-09-18 16:46:59,875 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.5787
98
+ 2025-09-18 16:47:03,332 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0790 | Val mean-roc_auc_score: 0.5941
99
+ 2025-09-18 16:47:04,935 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0790 | Val mean-roc_auc_score: 0.5973
100
+ 2025-09-18 16:47:08,444 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0867 | Val mean-roc_auc_score: 0.5890
101
+ 2025-09-18 16:47:12,150 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.5926
102
+ 2025-09-18 16:47:15,994 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5922
103
+ 2025-09-18 16:47:20,439 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0809 | Val mean-roc_auc_score: 0.5903
104
+ 2025-09-18 16:47:24,324 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5903
105
+ 2025-09-18 16:47:27,704 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.5894
106
+ 2025-09-18 16:47:31,075 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0771 | Val mean-roc_auc_score: 0.5876
107
+ 2025-09-18 16:47:34,298 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.5933
108
+ 2025-09-18 16:47:35,190 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0759 | Val mean-roc_auc_score: 0.5982
109
+ 2025-09-18 16:47:39,452 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0762 | Val mean-roc_auc_score: 0.5728
110
+ 2025-09-18 16:47:42,872 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.5815
111
+ 2025-09-18 16:47:46,561 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.5852
112
+ 2025-09-18 16:47:50,077 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0800 | Val mean-roc_auc_score: 0.5801
113
+ 2025-09-18 16:47:54,067 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5793
114
+ 2025-09-18 16:47:59,291 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.5805
115
+ 2025-09-18 16:48:04,060 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0734 | Val mean-roc_auc_score: 0.5837
116
+ 2025-09-18 16:48:06,336 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.5855
117
+ 2025-09-18 16:48:10,845 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.5892
118
+ 2025-09-18 16:48:11,332 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6372
119
+ 2025-09-18 16:48:11,874 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset sider at 2025-09-18_16-48-11
120
+ 2025-09-18 16:48:15,737 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5393 | Val mean-roc_auc_score: 0.5414
121
+ 2025-09-18 16:48:15,737 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
122
+ 2025-09-18 16:48:16,608 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5414
123
+ 2025-09-18 16:48:20,997 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5071 | Val mean-roc_auc_score: 0.5496
124
+ 2025-09-18 16:48:21,191 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
125
+ 2025-09-18 16:48:21,822 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5496
126
+ 2025-09-18 16:48:26,349 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.5000 | Val mean-roc_auc_score: 0.5730
127
+ 2025-09-18 16:48:26,532 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
128
+ 2025-09-18 16:48:27,045 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5730
129
+ 2025-09-18 16:48:31,310 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4821 | Val mean-roc_auc_score: 0.5610
130
+ 2025-09-18 16:48:36,537 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4571 | Val mean-roc_auc_score: 0.5953
131
+ 2025-09-18 16:48:36,712 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 175
132
+ 2025-09-18 16:48:34,773 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.5953
133
+ 2025-09-18 16:48:39,592 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4313 | Val mean-roc_auc_score: 0.5944
134
+ 2025-09-18 16:48:45,095 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4036 | Val mean-roc_auc_score: 0.5753
135
+ 2025-09-18 16:48:49,924 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3696 | Val mean-roc_auc_score: 0.6055
136
+ 2025-09-18 16:48:50,138 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
137
+ 2025-09-18 16:48:50,724 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.6055
138
+ 2025-09-18 16:48:55,002 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3458 | Val mean-roc_auc_score: 0.6008
139
+ 2025-09-18 16:48:59,612 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3179 | Val mean-roc_auc_score: 0.6060
140
+ 2025-09-18 16:48:59,808 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 350
141
+ 2025-09-18 16:49:00,326 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.6060
142
+ 2025-09-18 16:49:04,649 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2946 | Val mean-roc_auc_score: 0.6001
143
+ 2025-09-18 16:49:07,410 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2781 | Val mean-roc_auc_score: 0.6057
144
+ 2025-09-18 16:49:11,940 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2589 | Val mean-roc_auc_score: 0.5947
145
+ 2025-09-18 16:49:16,182 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2500 | Val mean-roc_auc_score: 0.5904
146
+ 2025-09-18 16:49:20,595 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2400 | Val mean-roc_auc_score: 0.5919
147
+ 2025-09-18 16:49:25,283 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2277 | Val mean-roc_auc_score: 0.5881
148
+ 2025-09-18 16:49:30,336 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2188 | Val mean-roc_auc_score: 0.5994
149
+ 2025-09-18 16:49:34,952 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.2104 | Val mean-roc_auc_score: 0.6169
150
+ 2025-09-18 16:49:35,099 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 630
151
+ 2025-09-18 16:49:35,684 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 18 with val mean-roc_auc_score: 0.6169
152
+ 2025-09-18 16:49:37,829 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1964 | Val mean-roc_auc_score: 0.6047
153
+ 2025-09-18 16:49:42,195 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1929 | Val mean-roc_auc_score: 0.6016
154
+ 2025-09-18 16:49:46,858 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1830 | Val mean-roc_auc_score: 0.6086
155
+ 2025-09-18 16:49:51,927 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1768 | Val mean-roc_auc_score: 0.5910
156
+ 2025-09-18 16:49:56,467 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1766 | Val mean-roc_auc_score: 0.6082
157
+ 2025-09-18 16:50:00,806 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1661 | Val mean-roc_auc_score: 0.6013
158
+ 2025-09-18 16:50:05,368 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1634 | Val mean-roc_auc_score: 0.6018
159
+ 2025-09-18 16:50:07,541 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1641 | Val mean-roc_auc_score: 0.6008
160
+ 2025-09-18 16:50:13,000 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1545 | Val mean-roc_auc_score: 0.5954
161
+ 2025-09-18 16:50:17,795 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1518 | Val mean-roc_auc_score: 0.6041
162
+ 2025-09-18 16:50:23,567 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1500 | Val mean-roc_auc_score: 0.6020
163
+ 2025-09-18 16:50:28,289 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1429 | Val mean-roc_auc_score: 0.5957
164
+ 2025-09-18 16:50:33,113 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1411 | Val mean-roc_auc_score: 0.5982
165
+ 2025-09-18 16:50:36,086 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1375 | Val mean-roc_auc_score: 0.6033
166
+ 2025-09-18 16:50:40,863 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1321 | Val mean-roc_auc_score: 0.6026
167
+ 2025-09-18 16:50:45,560 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1304 | Val mean-roc_auc_score: 0.5904
168
+ 2025-09-18 16:50:49,771 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1269 | Val mean-roc_auc_score: 0.5994
169
+ 2025-09-18 16:50:54,365 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1277 | Val mean-roc_auc_score: 0.6009
170
+ 2025-09-18 16:50:58,888 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1321 | Val mean-roc_auc_score: 0.5966
171
+ 2025-09-18 16:51:03,256 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1286 | Val mean-roc_auc_score: 0.5933
172
+ 2025-09-18 16:51:05,073 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6030
173
+ 2025-09-18 16:51:09,025 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.6018
174
+ 2025-09-18 16:51:13,560 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1138 | Val mean-roc_auc_score: 0.5960
175
+ 2025-09-18 16:51:18,381 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1129 | Val mean-roc_auc_score: 0.5978
176
+ 2025-09-18 16:51:22,961 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1156 | Val mean-roc_auc_score: 0.6002
177
+ 2025-09-18 16:51:27,366 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1116 | Val mean-roc_auc_score: 0.5986
178
+ 2025-09-18 16:51:32,163 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1116 | Val mean-roc_auc_score: 0.6066
179
+ 2025-09-18 16:51:37,036 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1086 | Val mean-roc_auc_score: 0.5944
180
+ 2025-09-18 16:51:39,793 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.1049 | Val mean-roc_auc_score: 0.5919
181
+ 2025-09-18 16:51:44,579 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.6037
182
+ 2025-09-18 16:51:49,428 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.1083 | Val mean-roc_auc_score: 0.6019
183
+ 2025-09-18 16:51:54,076 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.1013 | Val mean-roc_auc_score: 0.5926
184
+ 2025-09-18 16:51:58,593 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.5997
185
+ 2025-09-18 16:52:03,605 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.1039 | Val mean-roc_auc_score: 0.5986
186
+ 2025-09-18 16:52:05,451 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.1013 | Val mean-roc_auc_score: 0.6024
187
+ 2025-09-18 16:52:09,833 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0987 | Val mean-roc_auc_score: 0.5969
188
+ 2025-09-18 16:52:14,154 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0956 | Val mean-roc_auc_score: 0.5977
189
+ 2025-09-18 16:52:18,673 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0951 | Val mean-roc_auc_score: 0.5974
190
+ 2025-09-18 16:52:23,465 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0955 | Val mean-roc_auc_score: 0.5895
191
+ 2025-09-18 16:52:28,849 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0958 | Val mean-roc_auc_score: 0.5980
192
+ 2025-09-18 16:52:33,255 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0955 | Val mean-roc_auc_score: 0.5933
193
+ 2025-09-18 16:52:40,284 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0946 | Val mean-roc_auc_score: 0.5993
194
+ 2025-09-18 16:52:39,186 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0933 | Val mean-roc_auc_score: 0.6002
195
+ 2025-09-18 16:52:44,067 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0982 | Val mean-roc_auc_score: 0.6015
196
+ 2025-09-18 16:52:48,133 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0961 | Val mean-roc_auc_score: 0.5953
197
+ 2025-09-18 16:52:52,444 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0897 | Val mean-roc_auc_score: 0.6040
198
+ 2025-09-18 16:52:56,877 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0884 | Val mean-roc_auc_score: 0.5974
199
+ 2025-09-18 16:53:01,499 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0918 | Val mean-roc_auc_score: 0.5969
200
+ 2025-09-18 16:53:06,413 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6016
201
+ 2025-09-18 16:53:08,328 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6101
202
+ 2025-09-18 16:53:12,887 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0885 | Val mean-roc_auc_score: 0.5988
203
+ 2025-09-18 16:53:17,500 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.6037
204
+ 2025-09-18 16:53:22,277 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.6033
205
+ 2025-09-18 16:53:27,586 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0867 | Val mean-roc_auc_score: 0.6040
206
+ 2025-09-18 16:53:32,420 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0862 | Val mean-roc_auc_score: 0.5989
207
+ 2025-09-18 16:53:37,148 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5990
208
+ 2025-09-18 16:53:39,079 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.5994
209
+ 2025-09-18 16:53:43,565 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.6007
210
+ 2025-09-18 16:53:48,533 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.6012
211
+ 2025-09-18 16:53:52,929 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0833 | Val mean-roc_auc_score: 0.5981
212
+ 2025-09-18 16:53:57,144 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0821 | Val mean-roc_auc_score: 0.6005
213
+ 2025-09-18 16:54:00,960 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0862 | Val mean-roc_auc_score: 0.6004
214
+ 2025-09-18 16:54:05,352 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0826 | Val mean-roc_auc_score: 0.6028
215
+ 2025-09-18 16:54:07,759 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0804 | Val mean-roc_auc_score: 0.6027
216
+ 2025-09-18 16:54:12,119 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.5971
217
+ 2025-09-18 16:54:16,597 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.5978
218
+ 2025-09-18 16:54:21,265 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.5980
219
+ 2025-09-18 16:54:26,705 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0852 | Val mean-roc_auc_score: 0.6107
220
+ 2025-09-18 16:54:31,447 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.6065
221
+ 2025-09-18 16:54:35,936 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.6023
222
+ 2025-09-18 16:54:38,238 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0792 | Val mean-roc_auc_score: 0.5955
223
+ 2025-09-18 16:54:42,831 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.6010
224
+ 2025-09-18 16:54:47,335 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.6055
225
+ 2025-09-18 16:54:52,243 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0773 | Val mean-roc_auc_score: 0.5973
226
+ 2025-09-18 16:54:57,106 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5977
227
+ 2025-09-18 16:55:02,058 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.6006
228
+ 2025-09-18 16:55:06,889 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0775 | Val mean-roc_auc_score: 0.6018
229
+ 2025-09-18 16:55:09,000 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.6025
230
+ 2025-09-18 16:55:14,215 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5980
231
+ 2025-09-18 16:55:18,766 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0755 | Val mean-roc_auc_score: 0.5981
232
+ 2025-09-18 16:55:23,292 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.5979
233
+ 2025-09-18 16:55:27,834 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0790 | Val mean-roc_auc_score: 0.6059
234
+ 2025-09-18 16:55:28,208 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6575
235
+ 2025-09-18 16:55:28,709 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset sider at 2025-09-18_16-55-28
236
+ 2025-09-18 16:55:32,789 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5321 | Val mean-roc_auc_score: 0.5521
237
+ 2025-09-18 16:55:32,789 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
238
+ 2025-09-18 16:55:33,471 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5521
239
+ 2025-09-18 16:55:35,506 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5036 | Val mean-roc_auc_score: 0.5698
240
+ 2025-09-18 16:55:35,704 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
241
+ 2025-09-18 16:55:36,274 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5698
242
+ 2025-09-18 16:55:40,908 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4969 | Val mean-roc_auc_score: 0.5917
243
+ 2025-09-18 16:55:41,079 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
244
+ 2025-09-18 16:55:41,604 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5917
245
+ 2025-09-18 16:55:46,193 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4786 | Val mean-roc_auc_score: 0.5737
246
+ 2025-09-18 16:55:50,421 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4464 | Val mean-roc_auc_score: 0.5893
247
+ 2025-09-18 16:55:54,871 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4188 | Val mean-roc_auc_score: 0.5902
248
+ 2025-09-18 16:56:00,118 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3839 | Val mean-roc_auc_score: 0.5610
249
+ 2025-09-18 16:56:04,404 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3643 | Val mean-roc_auc_score: 0.5748
250
+ 2025-09-18 16:56:06,506 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3292 | Val mean-roc_auc_score: 0.5761
251
+ 2025-09-18 16:56:10,971 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3071 | Val mean-roc_auc_score: 0.5757
252
+ 2025-09-18 16:56:15,545 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.3000 | Val mean-roc_auc_score: 0.5948
253
+ 2025-09-18 16:56:16,155 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 385
254
+ 2025-09-18 16:56:16,722 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.5948
255
+ 2025-09-18 16:56:21,174 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2672 | Val mean-roc_auc_score: 0.5876
256
+ 2025-09-18 16:56:25,437 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2554 | Val mean-roc_auc_score: 0.5770
257
+ 2025-09-18 16:56:30,153 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2446 | Val mean-roc_auc_score: 0.5869
258
+ 2025-09-18 16:56:34,920 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2362 | Val mean-roc_auc_score: 0.5929
259
+ 2025-09-18 16:56:37,051 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2357 | Val mean-roc_auc_score: 0.5873
260
+ 2025-09-18 16:56:42,492 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2188 | Val mean-roc_auc_score: 0.5849
261
+ 2025-09-18 16:56:46,925 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.2073 | Val mean-roc_auc_score: 0.5800
262
+ 2025-09-18 16:56:51,820 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1955 | Val mean-roc_auc_score: 0.5806
263
+ 2025-09-18 16:56:56,500 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1875 | Val mean-roc_auc_score: 0.5891
264
+ 2025-09-18 16:57:01,588 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1866 | Val mean-roc_auc_score: 0.5690
265
+ 2025-09-18 16:57:06,778 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1786 | Val mean-roc_auc_score: 0.5873
266
+ 2025-09-18 16:57:09,173 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1750 | Val mean-roc_auc_score: 0.5781
267
+ 2025-09-18 16:57:14,079 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1670 | Val mean-roc_auc_score: 0.5899
268
+ 2025-09-18 16:57:18,980 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1616 | Val mean-roc_auc_score: 0.6006
269
+ 2025-09-18 16:57:19,129 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 875
270
+ 2025-09-18 16:57:19,682 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 25 with val mean-roc_auc_score: 0.6006
271
+ 2025-09-18 16:57:24,522 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1656 | Val mean-roc_auc_score: 0.5998
272
+ 2025-09-18 16:57:29,869 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1509 | Val mean-roc_auc_score: 0.5958
273
+ 2025-09-18 16:57:34,564 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1589 | Val mean-roc_auc_score: 0.6025
274
+ 2025-09-18 16:57:34,747 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 980
275
+ 2025-09-18 16:57:35,305 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val mean-roc_auc_score: 0.6025
276
+ 2025-09-18 16:57:38,639 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1552 | Val mean-roc_auc_score: 0.5949
277
+ 2025-09-18 16:57:43,648 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1464 | Val mean-roc_auc_score: 0.5918
278
+ 2025-09-18 16:57:48,296 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1411 | Val mean-roc_auc_score: 0.5836
279
+ 2025-09-18 16:57:53,775 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1484 | Val mean-roc_auc_score: 0.5795
280
+ 2025-09-18 16:57:58,687 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1339 | Val mean-roc_auc_score: 0.5906
281
+ 2025-09-18 16:58:03,660 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1295 | Val mean-roc_auc_score: 0.5813
282
+ 2025-09-18 16:58:05,718 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1331 | Val mean-roc_auc_score: 0.5848
283
+ 2025-09-18 16:58:10,336 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1277 | Val mean-roc_auc_score: 0.5911
284
+ 2025-09-18 16:58:15,638 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1232 | Val mean-roc_auc_score: 0.5739
285
+ 2025-09-18 16:58:20,064 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1214 | Val mean-roc_auc_score: 0.5733
286
+ 2025-09-18 16:58:24,513 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.5729
287
+ 2025-09-18 16:58:28,937 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1179 | Val mean-roc_auc_score: 0.5772
288
+ 2025-09-18 16:58:33,435 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1196 | Val mean-roc_auc_score: 0.5876
289
+ 2025-09-18 16:58:35,950 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1196 | Val mean-roc_auc_score: 0.5825
290
+ 2025-09-18 16:58:40,235 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1125 | Val mean-roc_auc_score: 0.5822
291
+ 2025-09-18 16:58:44,458 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.5818
292
+ 2025-09-18 16:58:49,285 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1116 | Val mean-roc_auc_score: 0.5910
293
+ 2025-09-18 16:58:54,054 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1148 | Val mean-roc_auc_score: 0.5861
294
+ 2025-09-18 16:58:59,095 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.5864
295
+ 2025-09-18 16:59:03,774 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.5871
296
+ 2025-09-18 16:59:06,262 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.1083 | Val mean-roc_auc_score: 0.5919
297
+ 2025-09-18 16:59:11,187 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.1027 | Val mean-roc_auc_score: 0.5870
298
+ 2025-09-18 16:59:16,088 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.5908
299
+ 2025-09-18 16:59:21,467 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.5844
300
+ 2025-09-18 16:59:26,252 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.5861
301
+ 2025-09-18 16:59:31,055 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0987 | Val mean-roc_auc_score: 0.5922
302
+ 2025-09-18 16:59:35,440 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0969 | Val mean-roc_auc_score: 0.5844
303
+ 2025-09-18 16:59:37,867 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0964 | Val mean-roc_auc_score: 0.5849
304
+ 2025-09-18 16:59:43,139 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0978 | Val mean-roc_auc_score: 0.5897
305
+ 2025-09-18 16:59:48,889 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.1016 | Val mean-roc_auc_score: 0.5858
306
+ 2025-09-18 16:59:53,385 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0978 | Val mean-roc_auc_score: 0.5824
307
+ 2025-09-18 16:59:57,909 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0942 | Val mean-roc_auc_score: 0.5837
308
+ 2025-09-18 17:00:02,294 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0942 | Val mean-roc_auc_score: 0.5887
309
+ 2025-09-18 17:00:07,306 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.5906
310
+ 2025-09-18 17:00:09,415 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.5854
311
+ 2025-09-18 17:00:13,872 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0920 | Val mean-roc_auc_score: 0.5910
312
+ 2025-09-18 17:00:18,179 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0929 | Val mean-roc_auc_score: 0.5942
313
+ 2025-09-18 17:00:22,695 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0926 | Val mean-roc_auc_score: 0.5881
314
+ 2025-09-18 17:00:27,875 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.5911
315
+ 2025-09-18 17:00:32,387 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.5911
316
+ 2025-09-18 17:00:36,862 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.5825
317
+ 2025-09-18 17:00:38,835 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0884 | Val mean-roc_auc_score: 0.5936
318
+ 2025-09-18 17:00:43,383 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5903
319
+ 2025-09-18 17:00:48,778 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0910 | Val mean-roc_auc_score: 0.5909
320
+ 2025-09-18 17:00:53,407 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.5984
321
+ 2025-09-18 17:00:58,220 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5857
322
+ 2025-09-18 17:01:02,961 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0856 | Val mean-roc_auc_score: 0.5860
323
+ 2025-09-18 17:01:07,967 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.5879
324
+ 2025-09-18 17:01:11,059 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.5903
325
+ 2025-09-18 17:01:16,079 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0818 | Val mean-roc_auc_score: 0.5882
326
+ 2025-09-18 17:01:21,418 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0835 | Val mean-roc_auc_score: 0.5917
327
+ 2025-09-18 17:01:26,395 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.5859
328
+ 2025-09-18 17:01:31,084 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.5866
329
+ 2025-09-18 17:01:36,346 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0821 | Val mean-roc_auc_score: 0.5973
330
+ 2025-09-18 17:01:38,876 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0887 | Val mean-roc_auc_score: 0.5962
331
+ 2025-09-18 17:01:43,190 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0804 | Val mean-roc_auc_score: 0.5919
332
+ 2025-09-18 17:01:47,841 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6031
333
+ 2025-09-18 17:01:47,998 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 2975
334
+ 2025-09-18 17:01:48,659 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 85 with val mean-roc_auc_score: 0.6031
335
+ 2025-09-18 17:01:53,754 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0836 | Val mean-roc_auc_score: 0.5904
336
+ 2025-09-18 17:01:59,237 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.5838
337
+ 2025-09-18 17:02:03,887 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.5958
338
+ 2025-09-18 17:02:06,076 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0807 | Val mean-roc_auc_score: 0.5914
339
+ 2025-09-18 17:02:11,068 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.5929
340
+ 2025-09-18 17:02:15,376 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5842
341
+ 2025-09-18 17:02:20,193 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.5902
342
+ 2025-09-18 17:02:24,462 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.5964
343
+ 2025-09-18 17:02:28,829 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5886
344
+ 2025-09-18 17:02:33,139 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0819 | Val mean-roc_auc_score: 0.5929
345
+ 2025-09-18 17:02:37,394 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0790 | Val mean-roc_auc_score: 0.5946
346
+ 2025-09-18 17:02:39,958 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5941
347
+ 2025-09-18 17:02:44,525 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0766 | Val mean-roc_auc_score: 0.5867
348
+ 2025-09-18 17:02:49,143 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.5869
349
+ 2025-09-18 17:02:53,718 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.5858
350
+ 2025-09-18 17:02:54,188 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6311
351
+ 2025-09-18 17:02:54,703 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.6419, Std Dev: 0.0113
logs_modchembert_classification_ModChemBERT-MLM/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_153929.log ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:39:29,417 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Running benchmark for dataset: tox21
2
+ 2025-09-18 15:39:29,417 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - dataset: tox21, tasks: ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 15:39:29,424 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset tox21 at 2025-09-18_15-39-29
4
+ 2025-09-18 15:39:40,639 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1784 | Val mean-roc_auc_score: 0.7319
5
+ 2025-09-18 15:39:40,639 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
6
+ 2025-09-18 15:39:41,229 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7319
7
+ 2025-09-18 15:39:52,243 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1671 | Val mean-roc_auc_score: 0.7497
8
+ 2025-09-18 15:39:52,375 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
9
+ 2025-09-18 15:39:52,881 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7497
10
+ 2025-09-18 15:40:06,106 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1534 | Val mean-roc_auc_score: 0.7629
11
+ 2025-09-18 15:40:06,274 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
12
+ 2025-09-18 15:40:06,834 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7629
13
+ 2025-09-18 15:40:18,009 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1518 | Val mean-roc_auc_score: 0.7666
14
+ 2025-09-18 15:40:18,173 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 784
15
+ 2025-09-18 15:40:18,660 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7666
16
+ 2025-09-18 15:40:31,946 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1445 | Val mean-roc_auc_score: 0.7652
17
+ 2025-09-18 15:40:45,401 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1414 | Val mean-roc_auc_score: 0.7568
18
+ 2025-09-18 15:40:56,249 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1389 | Val mean-roc_auc_score: 0.7487
19
+ 2025-09-18 15:41:08,833 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1241 | Val mean-roc_auc_score: 0.7472
20
+ 2025-09-18 15:41:19,228 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1270 | Val mean-roc_auc_score: 0.7474
21
+ 2025-09-18 15:41:31,579 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1135 | Val mean-roc_auc_score: 0.7396
22
+ 2025-09-18 15:41:45,412 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1099 | Val mean-roc_auc_score: 0.7319
23
+ 2025-09-18 15:41:56,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1028 | Val mean-roc_auc_score: 0.7281
24
+ 2025-09-18 15:42:08,453 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1003 | Val mean-roc_auc_score: 0.7349
25
+ 2025-09-18 15:42:19,043 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.7381
26
+ 2025-09-18 15:42:32,008 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0984 | Val mean-roc_auc_score: 0.7251
27
+ 2025-09-18 15:42:45,409 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0877 | Val mean-roc_auc_score: 0.7315
28
+ 2025-09-18 15:42:56,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0981 | Val mean-roc_auc_score: 0.7236
29
+ 2025-09-18 15:43:09,572 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0915 | Val mean-roc_auc_score: 0.7292
30
+ 2025-09-18 15:43:22,551 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0885 | Val mean-roc_auc_score: 0.7256
31
+ 2025-09-18 15:43:36,008 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0855 | Val mean-roc_auc_score: 0.7275
32
+ 2025-09-18 15:43:48,170 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0903 | Val mean-roc_auc_score: 0.7167
33
+ 2025-09-18 15:44:01,993 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0794 | Val mean-roc_auc_score: 0.7278
34
+ 2025-09-18 15:44:15,340 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0845 | Val mean-roc_auc_score: 0.7280
35
+ 2025-09-18 15:44:26,000 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1104 | Val mean-roc_auc_score: 0.7258
36
+ 2025-09-18 15:44:38,634 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.7198
37
+ 2025-09-18 15:44:50,636 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.7170
38
+ 2025-09-18 15:45:04,068 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0707 | Val mean-roc_auc_score: 0.7272
39
+ 2025-09-18 15:45:14,667 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.7191
40
+ 2025-09-18 15:45:27,232 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0755 | Val mean-roc_auc_score: 0.7104
41
+ 2025-09-18 15:45:39,908 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0730 | Val mean-roc_auc_score: 0.7187
42
+ 2025-09-18 15:45:50,657 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0678 | Val mean-roc_auc_score: 0.7161
43
+ 2025-09-18 15:46:03,631 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0686 | Val mean-roc_auc_score: 0.7155
44
+ 2025-09-18 15:46:16,601 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0657 | Val mean-roc_auc_score: 0.7076
45
+ 2025-09-18 15:46:26,853 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0684 | Val mean-roc_auc_score: 0.7142
46
+ 2025-09-18 15:46:39,741 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.7119
47
+ 2025-09-18 15:46:50,838 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0667 | Val mean-roc_auc_score: 0.7281
48
+ 2025-09-18 15:47:04,257 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.7096
49
+ 2025-09-18 15:47:14,926 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0677 | Val mean-roc_auc_score: 0.7124
50
+ 2025-09-18 15:47:27,856 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0675 | Val mean-roc_auc_score: 0.7186
51
+ 2025-09-18 15:47:41,032 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0609 | Val mean-roc_auc_score: 0.7233
52
+ 2025-09-18 15:47:52,473 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0686 | Val mean-roc_auc_score: 0.7231
53
+ 2025-09-18 15:48:05,599 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0630 | Val mean-roc_auc_score: 0.7159
54
+ 2025-09-18 15:48:15,795 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.7255
55
+ 2025-09-18 15:48:28,503 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0671 | Val mean-roc_auc_score: 0.7039
56
+ 2025-09-18 15:48:41,857 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0617 | Val mean-roc_auc_score: 0.7136
57
+ 2025-09-18 15:48:53,758 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0693 | Val mean-roc_auc_score: 0.7230
58
+ 2025-09-18 15:49:07,360 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7084
59
+ 2025-09-18 15:49:17,559 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0757 | Val mean-roc_auc_score: 0.7100
60
+ 2025-09-18 15:49:30,550 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0850 | Val mean-roc_auc_score: 0.7126
61
+ 2025-09-18 15:49:43,594 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0544 | Val mean-roc_auc_score: 0.7131
62
+ 2025-09-18 15:49:53,835 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0602 | Val mean-roc_auc_score: 0.7090
63
+ 2025-09-18 15:50:08,373 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.7094
64
+ 2025-09-18 15:50:18,966 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.7039
65
+ 2025-09-18 15:50:32,083 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7084
66
+ 2025-09-18 15:50:44,768 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.6998
67
+ 2025-09-18 15:50:55,249 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7101
68
+ 2025-09-18 15:51:09,954 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0538 | Val mean-roc_auc_score: 0.7178
69
+ 2025-09-18 15:51:20,635 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.7041
70
+ 2025-09-18 15:51:33,906 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0588 | Val mean-roc_auc_score: 0.7156
71
+ 2025-09-18 15:51:44,629 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7149
72
+ 2025-09-18 15:51:57,412 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7185
73
+ 2025-09-18 15:52:11,846 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0556 | Val mean-roc_auc_score: 0.7083
74
+ 2025-09-18 15:52:22,330 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0550 | Val mean-roc_auc_score: 0.7103
75
+ 2025-09-18 15:52:35,679 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0554 | Val mean-roc_auc_score: 0.7089
76
+ 2025-09-18 15:52:46,621 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7121
77
+ 2025-09-18 15:52:59,839 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0514 | Val mean-roc_auc_score: 0.7050
78
+ 2025-09-18 15:53:14,873 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0559 | Val mean-roc_auc_score: 0.7080
79
+ 2025-09-18 15:53:25,301 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0589 | Val mean-roc_auc_score: 0.7092
80
+ 2025-09-18 15:53:38,934 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0540 | Val mean-roc_auc_score: 0.7095
81
+ 2025-09-18 15:53:49,786 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0488 | Val mean-roc_auc_score: 0.6998
82
+ 2025-09-18 15:54:02,947 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0542 | Val mean-roc_auc_score: 0.7139
83
+ 2025-09-18 15:54:15,191 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0456 | Val mean-roc_auc_score: 0.7032
84
+ 2025-09-18 15:54:27,600 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.6998
85
+ 2025-09-18 15:54:40,677 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0549 | Val mean-roc_auc_score: 0.7048
86
+ 2025-09-18 15:54:51,277 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0528 | Val mean-roc_auc_score: 0.7083
87
+ 2025-09-18 15:55:04,400 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0488 | Val mean-roc_auc_score: 0.7061
88
+ 2025-09-18 15:55:16,446 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0520 | Val mean-roc_auc_score: 0.7076
89
+ 2025-09-18 15:55:29,240 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7154
90
+ 2025-09-18 15:55:42,926 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0521 | Val mean-roc_auc_score: 0.7081
91
+ 2025-09-18 15:55:54,930 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0494 | Val mean-roc_auc_score: 0.7063
92
+ 2025-09-18 15:56:07,926 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0526 | Val mean-roc_auc_score: 0.7152
93
+ 2025-09-18 15:56:19,706 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0525 | Val mean-roc_auc_score: 0.7065
94
+ 2025-09-18 15:56:32,707 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7115
95
+ 2025-09-18 15:56:46,504 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0498 | Val mean-roc_auc_score: 0.7067
96
+ 2025-09-18 15:56:57,708 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.6962
97
+ 2025-09-18 15:57:10,757 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.7084
98
+ 2025-09-18 15:57:22,533 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0532 | Val mean-roc_auc_score: 0.7048
99
+ 2025-09-18 15:57:35,697 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7078
100
+ 2025-09-18 15:57:46,698 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0533 | Val mean-roc_auc_score: 0.7111
101
+ 2025-09-18 15:57:59,595 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7069
102
+ 2025-09-18 15:58:13,009 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0454 | Val mean-roc_auc_score: 0.7078
103
+ 2025-09-18 15:58:25,030 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0500 | Val mean-roc_auc_score: 0.7019
104
+ 2025-09-18 15:58:38,228 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.7026
105
+ 2025-09-18 15:58:49,138 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0521 | Val mean-roc_auc_score: 0.7018
106
+ 2025-09-18 15:59:02,454 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0527 | Val mean-roc_auc_score: 0.7018
107
+ 2025-09-18 15:59:15,421 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0503 | Val mean-roc_auc_score: 0.7095
108
+ 2025-09-18 15:59:27,386 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0492 | Val mean-roc_auc_score: 0.7060
109
+ 2025-09-18 15:59:40,794 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0481 | Val mean-roc_auc_score: 0.7045
110
+ 2025-09-18 15:59:51,458 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0464 | Val mean-roc_auc_score: 0.7005
111
+ 2025-09-18 16:00:03,978 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0494 | Val mean-roc_auc_score: 0.7018
112
+ 2025-09-18 16:00:04,873 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7462
113
+ 2025-09-18 16:00:05,223 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset tox21 at 2025-09-18_16-00-05
114
+ 2025-09-18 16:00:15,437 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1797 | Val mean-roc_auc_score: 0.7295
115
+ 2025-09-18 16:00:15,437 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
116
+ 2025-09-18 16:00:16,352 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7295
117
+ 2025-09-18 16:00:26,485 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1658 | Val mean-roc_auc_score: 0.7447
118
+ 2025-09-18 16:00:26,613 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
119
+ 2025-09-18 16:00:27,096 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7447
120
+ 2025-09-18 16:00:39,999 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1534 | Val mean-roc_auc_score: 0.7587
121
+ 2025-09-18 16:00:40,162 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
122
+ 2025-09-18 16:00:40,654 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7587
123
+ 2025-09-18 16:00:51,777 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1436 | Val mean-roc_auc_score: 0.7470
124
+ 2025-09-18 16:01:05,536 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1437 | Val mean-roc_auc_score: 0.7409
125
+ 2025-09-18 16:01:17,860 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1423 | Val mean-roc_auc_score: 0.7509
126
+ 2025-09-18 16:01:31,136 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1276 | Val mean-roc_auc_score: 0.7362
127
+ 2025-09-18 16:01:44,461 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1222 | Val mean-roc_auc_score: 0.7327
128
+ 2025-09-18 16:01:55,546 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1206 | Val mean-roc_auc_score: 0.7325
129
+ 2025-09-18 16:02:08,843 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.7255
130
+ 2025-09-18 16:02:21,101 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1144 | Val mean-roc_auc_score: 0.7328
131
+ 2025-09-18 16:02:35,209 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1004 | Val mean-roc_auc_score: 0.7281
132
+ 2025-09-18 16:02:46,361 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0983 | Val mean-roc_auc_score: 0.7103
133
+ 2025-09-18 16:02:59,489 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.7253
134
+ 2025-09-18 16:03:12,389 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0930 | Val mean-roc_auc_score: 0.7186
135
+ 2025-09-18 16:03:23,523 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0920 | Val mean-roc_auc_score: 0.7057
136
+ 2025-09-18 16:03:37,232 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0840 | Val mean-roc_auc_score: 0.7167
137
+ 2025-09-18 16:03:47,925 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0926 | Val mean-roc_auc_score: 0.7072
138
+ 2025-09-18 16:04:00,714 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0905 | Val mean-roc_auc_score: 0.7035
139
+ 2025-09-18 16:04:13,658 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0867 | Val mean-roc_auc_score: 0.7055
140
+ 2025-09-18 16:04:30,174 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.7089
141
+ 2025-09-18 16:04:37,422 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.7110
142
+ 2025-09-18 16:04:50,218 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0771 | Val mean-roc_auc_score: 0.7024
143
+ 2025-09-18 16:05:03,040 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0603 | Val mean-roc_auc_score: 0.7072
144
+ 2025-09-18 16:05:13,425 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.7090
145
+ 2025-09-18 16:05:26,876 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.7034
146
+ 2025-09-18 16:05:37,930 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0740 | Val mean-roc_auc_score: 0.7057
147
+ 2025-09-18 16:05:51,680 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.7078
148
+ 2025-09-18 16:06:02,600 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.7075
149
+ 2025-09-18 16:06:16,496 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0699 | Val mean-roc_auc_score: 0.7085
150
+ 2025-09-18 16:06:30,132 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0691 | Val mean-roc_auc_score: 0.7076
151
+ 2025-09-18 16:06:40,531 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0673 | Val mean-roc_auc_score: 0.7097
152
+ 2025-09-18 16:06:53,346 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.7012
153
+ 2025-09-18 16:07:03,708 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0669 | Val mean-roc_auc_score: 0.7035
154
+ 2025-09-18 16:07:17,055 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0620 | Val mean-roc_auc_score: 0.6970
155
+ 2025-09-18 16:07:30,723 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0647 | Val mean-roc_auc_score: 0.7029
156
+ 2025-09-18 16:07:42,049 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.6998
157
+ 2025-09-18 16:07:54,942 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0667 | Val mean-roc_auc_score: 0.7106
158
+ 2025-09-18 16:08:05,195 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0700 | Val mean-roc_auc_score: 0.7038
159
+ 2025-09-18 16:08:18,512 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0605 | Val mean-roc_auc_score: 0.7058
160
+ 2025-09-18 16:08:32,737 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0647 | Val mean-roc_auc_score: 0.7048
161
+ 2025-09-18 16:08:44,020 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0640 | Val mean-roc_auc_score: 0.7017
162
+ 2025-09-18 16:08:57,343 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0594 | Val mean-roc_auc_score: 0.7106
163
+ 2025-09-18 16:09:07,883 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.7027
164
+ 2025-09-18 16:09:21,199 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0727 | Val mean-roc_auc_score: 0.7040
165
+ 2025-09-18 16:09:32,756 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7069
166
+ 2025-09-18 16:09:46,655 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0615 | Val mean-roc_auc_score: 0.7076
167
+ 2025-09-18 16:09:59,235 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0649 | Val mean-roc_auc_score: 0.7057
168
+ 2025-09-18 16:10:10,028 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0796 | Val mean-roc_auc_score: 0.7051
169
+ 2025-09-18 16:10:23,643 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7006
170
+ 2025-09-18 16:10:34,138 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7076
171
+ 2025-09-18 16:10:48,869 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7087
172
+ 2025-09-18 16:11:02,044 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.6996
173
+ 2025-09-18 16:11:13,319 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7150
174
+ 2025-09-18 16:11:26,311 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0555 | Val mean-roc_auc_score: 0.7049
175
+ 2025-09-18 16:11:36,411 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7051
176
+ 2025-09-18 16:11:50,418 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7047
177
+ 2025-09-18 16:12:03,109 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7055
178
+ 2025-09-18 16:12:14,399 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.6981
179
+ 2025-09-18 16:12:28,387 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7110
180
+ 2025-09-18 16:12:39,712 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7152
181
+ 2025-09-18 16:12:54,740 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0520 | Val mean-roc_auc_score: 0.7117
182
+ 2025-09-18 16:13:06,201 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7076
183
+ 2025-09-18 16:13:20,397 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0533 | Val mean-roc_auc_score: 0.7049
184
+ 2025-09-18 16:13:31,915 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0527 | Val mean-roc_auc_score: 0.7032
185
+ 2025-09-18 16:13:45,528 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0603 | Val mean-roc_auc_score: 0.7048
186
+ 2025-09-18 16:13:59,853 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0535 | Val mean-roc_auc_score: 0.6984
187
+ 2025-09-18 16:14:10,502 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0555 | Val mean-roc_auc_score: 0.7005
188
+ 2025-09-18 16:14:23,575 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7063
189
+ 2025-09-18 16:14:34,665 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0582 | Val mean-roc_auc_score: 0.6970
190
+ 2025-09-18 16:14:47,943 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0635 | Val mean-roc_auc_score: 0.7013
191
+ 2025-09-18 16:15:02,548 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0576 | Val mean-roc_auc_score: 0.7070
192
+ 2025-09-18 16:15:13,033 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0498 | Val mean-roc_auc_score: 0.7047
193
+ 2025-09-18 16:15:26,172 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0513 | Val mean-roc_auc_score: 0.7187
194
+ 2025-09-18 16:15:36,484 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0516 | Val mean-roc_auc_score: 0.7028
195
+ 2025-09-18 16:15:49,629 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0511 | Val mean-roc_auc_score: 0.7059
196
+ 2025-09-18 16:16:03,890 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0523 | Val mean-roc_auc_score: 0.7003
197
+ 2025-09-18 16:16:14,262 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0490 | Val mean-roc_auc_score: 0.7012
198
+ 2025-09-18 16:16:26,891 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0495 | Val mean-roc_auc_score: 0.7079
199
+ 2025-09-18 16:16:37,400 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0504 | Val mean-roc_auc_score: 0.6993
200
+ 2025-09-18 16:16:50,101 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0495 | Val mean-roc_auc_score: 0.7045
201
+ 2025-09-18 16:17:04,060 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0549 | Val mean-roc_auc_score: 0.7024
202
+ 2025-09-18 16:17:14,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0473 | Val mean-roc_auc_score: 0.7110
203
+ 2025-09-18 16:17:27,202 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0503 | Val mean-roc_auc_score: 0.6938
204
+ 2025-09-18 16:17:37,095 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0536 | Val mean-roc_auc_score: 0.6976
205
+ 2025-09-18 16:17:49,430 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0497 | Val mean-roc_auc_score: 0.7148
206
+ 2025-09-18 16:18:03,450 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0493 | Val mean-roc_auc_score: 0.7113
207
+ 2025-09-18 16:18:13,627 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0488 | Val mean-roc_auc_score: 0.7024
208
+ 2025-09-18 16:18:26,173 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0465 | Val mean-roc_auc_score: 0.6964
209
+ 2025-09-18 16:18:35,937 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.7050
210
+ 2025-09-18 16:18:48,498 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0512 | Val mean-roc_auc_score: 0.7112
211
+ 2025-09-18 16:19:02,181 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0474 | Val mean-roc_auc_score: 0.7055
212
+ 2025-09-18 16:19:12,194 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0485 | Val mean-roc_auc_score: 0.7055
213
+ 2025-09-18 16:19:24,754 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0511 | Val mean-roc_auc_score: 0.7001
214
+ 2025-09-18 16:19:34,738 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0455 | Val mean-roc_auc_score: 0.7071
215
+ 2025-09-18 16:19:47,243 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0481 | Val mean-roc_auc_score: 0.6946
216
+ 2025-09-18 16:20:01,342 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0495 | Val mean-roc_auc_score: 0.7088
217
+ 2025-09-18 16:20:11,359 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.7067
218
+ 2025-09-18 16:20:23,828 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0640 | Val mean-roc_auc_score: 0.7132
219
+ 2025-09-18 16:20:33,669 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0491 | Val mean-roc_auc_score: 0.6984
220
+ 2025-09-18 16:20:34,512 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7359
221
+ 2025-09-18 16:20:34,867 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset tox21 at 2025-09-18_16-20-34
222
+ 2025-09-18 16:20:45,179 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1797 | Val mean-roc_auc_score: 0.7228
223
+ 2025-09-18 16:20:45,179 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
224
+ 2025-09-18 16:20:46,015 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7228
225
+ 2025-09-18 16:20:58,915 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1596 | Val mean-roc_auc_score: 0.7413
226
+ 2025-09-18 16:20:59,044 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
227
+ 2025-09-18 16:20:59,527 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7413
228
+ 2025-09-18 16:21:09,677 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1591 | Val mean-roc_auc_score: 0.7565
229
+ 2025-09-18 16:21:09,841 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
230
+ 2025-09-18 16:21:10,328 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7565
231
+ 2025-09-18 16:21:23,049 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1436 | Val mean-roc_auc_score: 0.7444
232
+ 2025-09-18 16:21:34,549 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1398 | Val mean-roc_auc_score: 0.7565
233
+ 2025-09-18 16:21:34,716 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
234
+ 2025-09-18 16:21:35,206 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7565
235
+ 2025-09-18 16:21:49,910 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1308 | Val mean-roc_auc_score: 0.7477
236
+ 2025-09-18 16:22:04,202 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1302 | Val mean-roc_auc_score: 0.7425
237
+ 2025-09-18 16:22:15,540 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1278 | Val mean-roc_auc_score: 0.7475
238
+ 2025-09-18 16:22:28,762 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1147 | Val mean-roc_auc_score: 0.7392
239
+ 2025-09-18 16:22:39,419 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1109 | Val mean-roc_auc_score: 0.7402
240
+ 2025-09-18 16:22:53,810 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1105 | Val mean-roc_auc_score: 0.7265
241
+ 2025-09-18 16:23:05,634 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1058 | Val mean-roc_auc_score: 0.7264
242
+ 2025-09-18 16:23:19,088 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0983 | Val mean-roc_auc_score: 0.7232
243
+ 2025-09-18 16:23:33,547 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0980 | Val mean-roc_auc_score: 0.7303
244
+ 2025-09-18 16:23:45,643 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0934 | Val mean-roc_auc_score: 0.7253
245
+ 2025-09-18 16:24:01,361 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0907 | Val mean-roc_auc_score: 0.7270
246
+ 2025-09-18 16:24:13,751 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0918 | Val mean-roc_auc_score: 0.7324
247
+ 2025-09-18 16:24:27,880 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0882 | Val mean-roc_auc_score: 0.7029
248
+ 2025-09-18 16:24:39,604 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0898 | Val mean-roc_auc_score: 0.7175
249
+ 2025-09-18 16:24:53,870 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.7199
250
+ 2025-09-18 16:25:06,861 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0894 | Val mean-roc_auc_score: 0.7132
251
+ 2025-09-18 16:25:21,395 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0794 | Val mean-roc_auc_score: 0.7186
252
+ 2025-09-18 16:25:33,561 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.7240
253
+ 2025-09-18 16:25:47,715 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1108 | Val mean-roc_auc_score: 0.7110
254
+ 2025-09-18 16:26:01,604 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.7142
255
+ 2025-09-18 16:26:13,826 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0736 | Val mean-roc_auc_score: 0.7203
256
+ 2025-09-18 16:26:28,444 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0693 | Val mean-roc_auc_score: 0.7085
257
+ 2025-09-18 16:26:39,575 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0735 | Val mean-roc_auc_score: 0.7140
258
+ 2025-09-18 16:26:52,715 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0740 | Val mean-roc_auc_score: 0.7207
259
+ 2025-09-18 16:27:03,937 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0715 | Val mean-roc_auc_score: 0.7202
260
+ 2025-09-18 16:27:18,605 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0707 | Val mean-roc_auc_score: 0.7178
261
+ 2025-09-18 16:27:32,411 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0734 | Val mean-roc_auc_score: 0.7089
262
+ 2025-09-18 16:27:43,384 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0694 | Val mean-roc_auc_score: 0.7118
263
+ 2025-09-18 16:27:57,114 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7117
264
+ 2025-09-18 16:28:08,637 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.7169
265
+ 2025-09-18 16:28:23,685 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0656 | Val mean-roc_auc_score: 0.7141
266
+ 2025-09-18 16:28:35,540 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0667 | Val mean-roc_auc_score: 0.7166
267
+ 2025-09-18 16:28:49,505 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0687 | Val mean-roc_auc_score: 0.7119
268
+ 2025-09-18 16:29:02,870 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0682 | Val mean-roc_auc_score: 0.7140
269
+ 2025-09-18 16:29:13,581 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0637 | Val mean-roc_auc_score: 0.7074
270
+ 2025-09-18 16:29:28,112 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0751 | Val mean-roc_auc_score: 0.7178
271
+ 2025-09-18 16:29:39,407 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.6973
272
+ 2025-09-18 16:29:52,349 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0670 | Val mean-roc_auc_score: 0.7073
273
+ 2025-09-18 16:30:04,207 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7101
274
+ 2025-09-18 16:30:18,676 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0660 | Val mean-roc_auc_score: 0.7080
275
+ 2025-09-18 16:30:34,351 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0601 | Val mean-roc_auc_score: 0.7031
276
+ 2025-09-18 16:30:46,403 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0641 | Val mean-roc_auc_score: 0.7049
277
+ 2025-09-18 16:30:59,858 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.7081
278
+ 2025-09-18 16:31:10,742 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0601 | Val mean-roc_auc_score: 0.7083
279
+ 2025-09-18 16:31:24,015 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7060
280
+ 2025-09-18 16:31:34,630 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7076
281
+ 2025-09-18 16:31:49,519 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0591 | Val mean-roc_auc_score: 0.7120
282
+ 2025-09-18 16:32:03,491 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.6988
283
+ 2025-09-18 16:32:14,773 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.7071
284
+ 2025-09-18 16:32:29,568 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7087
285
+ 2025-09-18 16:32:41,431 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7064
286
+ 2025-09-18 16:32:57,124 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7044
287
+ 2025-09-18 16:33:08,657 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0540 | Val mean-roc_auc_score: 0.7127
288
+ 2025-09-18 16:33:22,484 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0569 | Val mean-roc_auc_score: 0.6980
289
+ 2025-09-18 16:33:33,672 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0568 | Val mean-roc_auc_score: 0.7035
290
+ 2025-09-18 16:33:47,582 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0605 | Val mean-roc_auc_score: 0.7058
291
+ 2025-09-18 16:34:02,361 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.7037
292
+ 2025-09-18 16:34:13,004 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0602 | Val mean-roc_auc_score: 0.7055
293
+ 2025-09-18 16:34:26,695 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0568 | Val mean-roc_auc_score: 0.6993
294
+ 2025-09-18 16:34:36,958 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0570 | Val mean-roc_auc_score: 0.6971
295
+ 2025-09-18 16:34:50,809 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0540 | Val mean-roc_auc_score: 0.7004
296
+ 2025-09-18 16:35:05,739 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.6964
297
+ 2025-09-18 16:35:16,219 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0550 | Val mean-roc_auc_score: 0.7052
298
+ 2025-09-18 16:35:30,004 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.6998
299
+ 2025-09-18 16:35:41,320 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7001
300
+ 2025-09-18 16:35:55,132 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0520 | Val mean-roc_auc_score: 0.6988
301
+ 2025-09-18 16:36:07,811 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0524 | Val mean-roc_auc_score: 0.7102
302
+ 2025-09-18 16:36:21,716 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0635 | Val mean-roc_auc_score: 0.7015
303
+ 2025-09-18 16:36:35,404 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7038
304
+ 2025-09-18 16:36:46,268 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0506 | Val mean-roc_auc_score: 0.7015
305
+ 2025-09-18 16:36:59,770 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.6996
306
+ 2025-09-18 16:37:12,170 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0503 | Val mean-roc_auc_score: 0.7020
307
+ 2025-09-18 16:37:25,874 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0518 | Val mean-roc_auc_score: 0.7057
308
+ 2025-09-18 16:37:37,335 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0562 | Val mean-roc_auc_score: 0.7084
309
+ 2025-09-18 16:37:51,109 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0490 | Val mean-roc_auc_score: 0.7006
310
+ 2025-09-18 16:38:04,757 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.7087
311
+ 2025-09-18 16:38:16,726 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0473 | Val mean-roc_auc_score: 0.7027
312
+ 2025-09-18 16:38:30,817 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0533 | Val mean-roc_auc_score: 0.7010
313
+ 2025-09-18 16:38:42,072 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0525 | Val mean-roc_auc_score: 0.7030
314
+ 2025-09-18 16:38:55,926 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0487 | Val mean-roc_auc_score: 0.7011
315
+ 2025-09-18 16:39:07,409 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0552 | Val mean-roc_auc_score: 0.7032
316
+ 2025-09-18 16:39:22,687 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0484 | Val mean-roc_auc_score: 0.6975
317
+ 2025-09-18 16:39:36,031 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0518 | Val mean-roc_auc_score: 0.7015
318
+ 2025-09-18 16:39:46,884 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0518 | Val mean-roc_auc_score: 0.7023
319
+ 2025-09-18 16:40:00,888 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.6996
320
+ 2025-09-18 16:40:12,205 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0519 | Val mean-roc_auc_score: 0.7108
321
+ 2025-09-18 16:40:28,118 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0452 | Val mean-roc_auc_score: 0.6952
322
+ 2025-09-18 16:40:40,284 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0494 | Val mean-roc_auc_score: 0.7045
323
+ 2025-09-18 16:40:54,794 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.6968
324
+ 2025-09-18 16:41:06,694 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0504 | Val mean-roc_auc_score: 0.7086
325
+ 2025-09-18 16:41:20,880 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0461 | Val mean-roc_auc_score: 0.7000
326
+ 2025-09-18 16:41:36,221 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0540 | Val mean-roc_auc_score: 0.6983
327
+ 2025-09-18 16:41:47,869 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0486 | Val mean-roc_auc_score: 0.7001
328
+ 2025-09-18 16:42:01,996 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0596 | Val mean-roc_auc_score: 0.7018
329
+ 2025-09-18 16:42:13,792 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0484 | Val mean-roc_auc_score: 0.7076
330
+ 2025-09-18 16:42:14,723 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7380
331
+ 2025-09-18 16:42:15,142 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7400, Std Dev: 0.0044
logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_151852.log ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:18:52,019 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_regression
2
+ 2025-09-18 15:18:52,019 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - dataset: bace_regression, tasks: ['pIC50'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 15:18:52,025 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_regression at 2025-09-18_15-18-52
4
+ 2025-09-18 15:18:57,110 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.8553 | Val rms_score: 0.6951
5
+ 2025-09-18 15:18:57,110 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 15:18:57,903 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.6951
7
+ 2025-09-18 15:19:03,723 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4638 | Val rms_score: 0.7221
8
+ 2025-09-18 15:19:09,666 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3438 | Val rms_score: 0.7125
9
+ 2025-09-18 15:19:12,840 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2928 | Val rms_score: 0.7691
10
+ 2025-09-18 15:19:18,556 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2681 | Val rms_score: 0.6612
11
+ 2025-09-18 15:19:18,736 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 190
12
+ 2025-09-18 15:19:19,322 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.6612
13
+ 2025-09-18 15:19:25,225 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2266 | Val rms_score: 0.7377
14
+ 2025-09-18 15:19:31,243 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2188 | Val rms_score: 0.6986
15
+ 2025-09-18 15:19:37,080 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2002 | Val rms_score: 0.6346
16
+ 2025-09-18 15:19:37,242 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 304
17
+ 2025-09-18 15:19:37,716 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.6346
18
+ 2025-09-18 15:19:43,533 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1809 | Val rms_score: 0.7116
19
+ 2025-09-18 15:19:46,856 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1661 | Val rms_score: 0.6856
20
+ 2025-09-18 15:19:52,592 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1736 | Val rms_score: 0.6593
21
+ 2025-09-18 15:19:58,492 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1464 | Val rms_score: 0.7111
22
+ 2025-09-18 15:20:04,312 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1456 | Val rms_score: 0.7842
23
+ 2025-09-18 15:20:09,972 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1494 | Val rms_score: 0.6996
24
+ 2025-09-18 15:20:13,136 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1266 | Val rms_score: 0.7195
25
+ 2025-09-18 15:20:18,840 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1191 | Val rms_score: 0.7628
26
+ 2025-09-18 15:20:24,785 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1225 | Val rms_score: 0.7093
27
+ 2025-09-18 15:20:30,392 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1110 | Val rms_score: 0.7830
28
+ 2025-09-18 15:20:36,094 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1009 | Val rms_score: 0.7138
29
+ 2025-09-18 15:20:41,969 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1077 | Val rms_score: 0.7114
30
+ 2025-09-18 15:20:45,136 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0987 | Val rms_score: 0.6894
31
+ 2025-09-18 15:20:51,118 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0977 | Val rms_score: 0.7168
32
+ 2025-09-18 15:20:56,786 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0954 | Val rms_score: 0.8236
33
+ 2025-09-18 15:21:02,448 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0964 | Val rms_score: 0.8027
34
+ 2025-09-18 15:21:08,156 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0942 | Val rms_score: 0.7591
35
+ 2025-09-18 15:21:13,837 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0962 | Val rms_score: 0.7263
36
+ 2025-09-18 15:21:18,336 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0980 | Val rms_score: 0.7341
37
+ 2025-09-18 15:21:24,177 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0884 | Val rms_score: 0.7557
38
+ 2025-09-18 15:21:29,930 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1162 | Val rms_score: 0.7214
39
+ 2025-09-18 15:21:35,725 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0855 | Val rms_score: 0.7748
40
+ 2025-09-18 15:21:41,421 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0789 | Val rms_score: 0.7135
41
+ 2025-09-18 15:21:44,959 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0869 | Val rms_score: 0.6908
42
+ 2025-09-18 15:21:50,688 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0847 | Val rms_score: 0.7298
43
+ 2025-09-18 15:21:56,310 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0728 | Val rms_score: 0.7092
44
+ 2025-09-18 15:22:01,944 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0786 | Val rms_score: 0.7258
45
+ 2025-09-18 15:22:07,648 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0752 | Val rms_score: 0.7815
46
+ 2025-09-18 15:22:13,695 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0710 | Val rms_score: 0.7421
47
+ 2025-09-18 15:22:16,837 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0728 | Val rms_score: 0.6843
48
+ 2025-09-18 15:22:22,674 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0748 | Val rms_score: 0.7667
49
+ 2025-09-18 15:22:28,361 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0770 | Val rms_score: 0.7258
50
+ 2025-09-18 15:22:34,118 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0670 | Val rms_score: 0.7189
51
+ 2025-09-18 15:22:40,146 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0629 | Val rms_score: 0.7206
52
+ 2025-09-18 15:22:43,470 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0634 | Val rms_score: 0.7107
53
+ 2025-09-18 15:22:49,163 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0662 | Val rms_score: 0.6972
54
+ 2025-09-18 15:22:54,915 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0703 | Val rms_score: 0.7139
55
+ 2025-09-18 15:23:00,537 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0650 | Val rms_score: 0.7337
56
+ 2025-09-18 15:23:06,444 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0654 | Val rms_score: 0.7153
57
+ 2025-09-18 15:23:12,073 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0618 | Val rms_score: 0.7200
58
+ 2025-09-18 15:23:15,206 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0588 | Val rms_score: 0.7304
59
+ 2025-09-18 15:23:20,837 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0625 | Val rms_score: 0.7797
60
+ 2025-09-18 15:23:26,526 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0617 | Val rms_score: 0.7157
61
+ 2025-09-18 15:23:32,536 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0596 | Val rms_score: 0.7572
62
+ 2025-09-18 15:23:39,233 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0765 | Val rms_score: 0.8194
63
+ 2025-09-18 15:23:42,391 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0641 | Val rms_score: 0.7550
64
+ 2025-09-18 15:23:48,250 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0604 | Val rms_score: 0.7227
65
+ 2025-09-18 15:23:53,961 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0580 | Val rms_score: 0.7398
66
+ 2025-09-18 15:23:59,907 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0543 | Val rms_score: 0.7306
67
+ 2025-09-18 15:24:05,715 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0625 | Val rms_score: 0.7029
68
+ 2025-09-18 15:24:11,429 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0600 | Val rms_score: 0.7384
69
+ 2025-09-18 15:24:14,767 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0588 | Val rms_score: 0.7146
70
+ 2025-09-18 15:24:20,465 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0660 | Val rms_score: 0.7098
71
+ 2025-09-18 15:24:26,427 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0543 | Val rms_score: 0.7859
72
+ 2025-09-18 15:24:32,052 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0604 | Val rms_score: 0.7254
73
+ 2025-09-18 15:24:37,722 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0527 | Val rms_score: 0.7835
74
+ 2025-09-18 15:24:43,507 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0522 | Val rms_score: 0.7603
75
+ 2025-09-18 15:24:46,701 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0576 | Val rms_score: 0.7262
76
+ 2025-09-18 15:24:52,665 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0502 | Val rms_score: 0.7501
77
+ 2025-09-18 15:24:58,331 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0461 | Val rms_score: 0.7792
78
+ 2025-09-18 15:25:04,011 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0504 | Val rms_score: 0.7394
79
+ 2025-09-18 15:25:09,704 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0487 | Val rms_score: 0.7650
80
+ 2025-09-18 15:25:12,894 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0489 | Val rms_score: 0.7397
81
+ 2025-09-18 15:25:18,972 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0560 | Val rms_score: 0.7436
82
+ 2025-09-18 15:25:24,647 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0487 | Val rms_score: 0.7570
83
+ 2025-09-18 15:25:30,269 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0446 | Val rms_score: 0.7454
84
+ 2025-09-18 15:25:35,889 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0454 | Val rms_score: 0.7327
85
+ 2025-09-18 15:25:41,699 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0539 | Val rms_score: 0.7225
86
+ 2025-09-18 15:25:45,153 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0541 | Val rms_score: 0.7495
87
+ 2025-09-18 15:25:50,904 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0555 | Val rms_score: 0.7059
88
+ 2025-09-18 15:25:57,616 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0562 | Val rms_score: 0.7601
89
+ 2025-09-18 15:26:03,314 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0495 | Val rms_score: 0.7332
90
+ 2025-09-18 15:26:08,987 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0436 | Val rms_score: 0.7519
91
+ 2025-09-18 15:26:17,644 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0540 | Val rms_score: 0.7226
92
+ 2025-09-18 15:26:18,181 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0477 | Val rms_score: 0.7398
93
+ 2025-09-18 15:26:23,813 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0510 | Val rms_score: 0.7550
94
+ 2025-09-18 15:26:29,493 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0510 | Val rms_score: 0.7785
95
+ 2025-09-18 15:26:35,192 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0458 | Val rms_score: 0.7220
96
+ 2025-09-18 15:26:41,148 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0550 | Val rms_score: 0.7317
97
+ 2025-09-18 15:26:44,385 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0432 | Val rms_score: 0.7419
98
+ 2025-09-18 15:26:50,155 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0456 | Val rms_score: 0.7512
99
+ 2025-09-18 15:26:55,847 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0486 | Val rms_score: 0.7828
100
+ 2025-09-18 15:27:01,473 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0495 | Val rms_score: 0.7819
101
+ 2025-09-18 15:27:07,488 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0458 | Val rms_score: 0.7370
102
+ 2025-09-18 15:27:13,173 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0446 | Val rms_score: 0.7499
103
+ 2025-09-18 15:27:16,315 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0469 | Val rms_score: 0.7688
104
+ 2025-09-18 15:27:22,070 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0463 | Val rms_score: 0.7912
105
+ 2025-09-18 15:27:27,847 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0450 | Val rms_score: 0.7221
106
+ 2025-09-18 15:27:33,970 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0409 | Val rms_score: 0.7438
107
+ 2025-09-18 15:27:39,679 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0430 | Val rms_score: 0.7583
108
+ 2025-09-18 15:27:42,957 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0465 | Val rms_score: 0.8157
109
+ 2025-09-18 15:27:48,767 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0514 | Val rms_score: 0.7938
110
+ 2025-09-18 15:27:49,355 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 0.9044
111
+ 2025-09-18 15:27:49,653 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_regression at 2025-09-18_15-27-49
112
+ 2025-09-18 15:27:54,803 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.1184 | Val rms_score: 0.9056
113
+ 2025-09-18 15:27:54,803 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
114
+ 2025-09-18 15:27:55,408 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.9056
115
+ 2025-09-18 15:28:01,145 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5921 | Val rms_score: 0.6901
116
+ 2025-09-18 15:28:01,311 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
117
+ 2025-09-18 15:28:01,834 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6901
118
+ 2025-09-18 15:28:07,942 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4040 | Val rms_score: 0.6543
119
+ 2025-09-18 15:28:08,117 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 114
120
+ 2025-09-18 15:28:08,621 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.6543
121
+ 2025-09-18 15:28:14,342 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.3766 | Val rms_score: 0.6808
122
+ 2025-09-18 15:28:17,589 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.3043 | Val rms_score: 0.7321
123
+ 2025-09-18 15:28:23,215 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2656 | Val rms_score: 0.7009
124
+ 2025-09-18 15:28:29,320 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2368 | Val rms_score: 0.6909
125
+ 2025-09-18 15:28:34,982 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1777 | Val rms_score: 0.8341
126
+ 2025-09-18 15:28:40,721 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1875 | Val rms_score: 0.6885
127
+ 2025-09-18 15:28:43,908 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1850 | Val rms_score: 0.7193
128
+ 2025-09-18 15:28:49,603 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1580 | Val rms_score: 0.7146
129
+ 2025-09-18 15:28:55,633 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1456 | Val rms_score: 0.7130
130
+ 2025-09-18 15:29:01,309 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1431 | Val rms_score: 0.8124
131
+ 2025-09-18 15:29:07,126 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1484 | Val rms_score: 0.7917
132
+ 2025-09-18 15:29:12,880 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1332 | Val rms_score: 0.7644
133
+ 2025-09-18 15:29:16,097 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1523 | Val rms_score: 0.7722
134
+ 2025-09-18 15:29:22,072 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1242 | Val rms_score: 0.7186
135
+ 2025-09-18 15:29:27,731 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1160 | Val rms_score: 0.7852
136
+ 2025-09-18 15:29:33,334 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1193 | Val rms_score: 0.7879
137
+ 2025-09-18 15:29:39,071 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1069 | Val rms_score: 0.7610
138
+ 2025-09-18 15:29:44,792 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0991 | Val rms_score: 0.7188
139
+ 2025-09-18 15:29:48,302 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1072 | Val rms_score: 0.7205
140
+ 2025-09-18 15:29:54,049 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0970 | Val rms_score: 0.7713
141
+ 2025-09-18 15:29:59,795 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0951 | Val rms_score: 0.7695
142
+ 2025-09-18 15:30:05,565 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0892 | Val rms_score: 0.7867
143
+ 2025-09-18 15:30:11,282 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0896 | Val rms_score: 0.7927
144
+ 2025-09-18 15:30:15,791 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0907 | Val rms_score: 0.7193
145
+ 2025-09-18 15:30:21,438 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0839 | Val rms_score: 0.7546
146
+ 2025-09-18 15:30:27,093 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0869 | Val rms_score: 0.7928
147
+ 2025-09-18 15:30:32,830 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0802 | Val rms_score: 0.7671
148
+ 2025-09-18 15:30:38,437 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0831 | Val rms_score: 0.8148
149
+ 2025-09-18 15:30:44,329 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0845 | Val rms_score: 0.7535
150
+ 2025-09-18 15:30:47,462 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0765 | Val rms_score: 0.7650
151
+ 2025-09-18 15:30:53,191 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0773 | Val rms_score: 0.7214
152
+ 2025-09-18 15:30:58,936 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0750 | Val rms_score: 0.7544
153
+ 2025-09-18 15:31:04,537 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0683 | Val rms_score: 0.7791
154
+ 2025-09-18 15:31:10,566 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0768 | Val rms_score: 0.7473
155
+ 2025-09-18 15:31:13,684 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0748 | Val rms_score: 0.7065
156
+ 2025-09-18 15:31:19,453 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0736 | Val rms_score: 0.7430
157
+ 2025-09-18 15:31:25,103 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0648 | Val rms_score: 0.7597
158
+ 2025-09-18 15:31:30,979 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0724 | Val rms_score: 0.7320
159
+ 2025-09-18 15:31:37,034 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0740 | Val rms_score: 0.7278
160
+ 2025-09-18 15:31:42,773 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0703 | Val rms_score: 0.7584
161
+ 2025-09-18 15:31:45,896 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0654 | Val rms_score: 0.7276
162
+ 2025-09-18 15:31:51,635 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0594 | Val rms_score: 0.7762
163
+ 2025-09-18 15:31:57,306 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0633 | Val rms_score: 0.7568
164
+ 2025-09-18 15:32:03,227 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0617 | Val rms_score: 0.7236
165
+ 2025-09-18 15:32:08,893 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0618 | Val rms_score: 0.7976
166
+ 2025-09-18 15:32:14,594 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0674 | Val rms_score: 0.7781
167
+ 2025-09-18 15:32:17,834 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0666 | Val rms_score: 0.7447
168
+ 2025-09-18 15:32:23,457 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0584 | Val rms_score: 0.7812
169
+ 2025-09-18 15:32:29,497 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0604 | Val rms_score: 0.7482
170
+ 2025-09-18 15:32:36,257 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0614 | Val rms_score: 0.7905
171
+ 2025-09-18 15:32:41,896 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0592 | Val rms_score: 0.7367
172
+ 2025-09-18 15:32:45,207 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0563 | Val rms_score: 0.7400
173
+ 2025-09-18 15:32:51,095 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0614 | Val rms_score: 0.7785
174
+ 2025-09-18 15:32:57,027 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0551 | Val rms_score: 0.7648
175
+ 2025-09-18 15:33:02,814 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0557 | Val rms_score: 0.7617
176
+ 2025-09-18 15:33:08,475 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0543 | Val rms_score: 0.7533
177
+ 2025-09-18 15:33:14,178 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0522 | Val rms_score: 0.7956
178
+ 2025-09-18 15:33:17,472 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0495 | Val rms_score: 0.7585
179
+ 2025-09-18 15:33:23,489 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0512 | Val rms_score: 0.8291
180
+ 2025-09-18 15:33:29,099 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0563 | Val rms_score: 0.7725
181
+ 2025-09-18 15:33:34,826 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0564 | Val rms_score: 0.7567
182
+ 2025-09-18 15:33:40,500 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0520 | Val rms_score: 0.7964
183
+ 2025-09-18 15:33:43,823 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0574 | Val rms_score: 0.7290
184
+ 2025-09-18 15:33:49,843 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0535 | Val rms_score: 0.7593
185
+ 2025-09-18 15:33:55,521 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0465 | Val rms_score: 0.7764
186
+ 2025-09-18 15:34:01,266 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0632 | Val rms_score: 0.7119
187
+ 2025-09-18 15:34:06,895 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0637 | Val rms_score: 0.7476
188
+ 2025-09-18 15:34:12,532 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0551 | Val rms_score: 0.7383
189
+ 2025-09-18 15:34:15,998 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0486 | Val rms_score: 0.7695
190
+ 2025-09-18 15:34:21,714 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0471 | Val rms_score: 0.7698
191
+ 2025-09-18 15:34:27,484 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0524 | Val rms_score: 0.7565
192
+ 2025-09-18 15:34:33,168 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0504 | Val rms_score: 0.8016
193
+ 2025-09-18 15:34:38,721 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0510 | Val rms_score: 0.7490
194
+ 2025-09-18 15:34:44,651 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0499 | Val rms_score: 0.8228
195
+ 2025-09-18 15:34:47,801 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0508 | Val rms_score: 0.7548
196
+ 2025-09-18 15:34:54,430 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0664 | Val rms_score: 0.7590
197
+ 2025-09-18 15:35:00,096 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0475 | Val rms_score: 0.7444
198
+ 2025-09-18 15:35:05,739 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0432 | Val rms_score: 0.7663
199
+ 2025-09-18 15:35:11,770 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0486 | Val rms_score: 0.8183
200
+ 2025-09-18 15:35:14,915 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0448 | Val rms_score: 0.7727
201
+ 2025-09-18 15:35:20,561 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0428 | Val rms_score: 0.7923
202
+ 2025-09-18 15:35:26,200 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0477 | Val rms_score: 0.7526
203
+ 2025-09-18 15:35:31,823 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0444 | Val rms_score: 0.7697
204
+ 2025-09-18 15:35:37,818 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0368 | Val rms_score: 0.7782
205
+ 2025-09-18 15:35:43,538 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0493 | Val rms_score: 0.7797
206
+ 2025-09-18 15:35:46,759 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0487 | Val rms_score: 0.7477
207
+ 2025-09-18 15:35:52,499 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0443 | Val rms_score: 0.7496
208
+ 2025-09-18 15:35:58,244 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0432 | Val rms_score: 0.7610
209
+ 2025-09-18 15:36:04,235 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0454 | Val rms_score: 0.7399
210
+ 2025-09-18 15:36:09,882 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0450 | Val rms_score: 0.7702
211
+ 2025-09-18 15:36:13,120 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0424 | Val rms_score: 0.7420
212
+ 2025-09-18 15:36:18,812 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0422 | Val rms_score: 0.7739
213
+ 2025-09-18 15:36:24,522 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0415 | Val rms_score: 0.7909
214
+ 2025-09-18 15:36:30,594 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0438 | Val rms_score: 0.7772
215
+ 2025-09-18 15:36:36,173 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0433 | Val rms_score: 0.7776
216
+ 2025-09-18 15:36:41,840 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0421 | Val rms_score: 0.7907
217
+ 2025-09-18 15:36:45,101 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0417 | Val rms_score: 0.7497
218
+ 2025-09-18 15:36:45,676 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 1.1602
219
+ 2025-09-18 15:36:45,960 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_regression at 2025-09-18_15-36-45
220
+ 2025-09-18 15:36:51,035 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.0395 | Val rms_score: 0.6793
221
+ 2025-09-18 15:36:51,035 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
222
+ 2025-09-18 15:36:51,745 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.6793
223
+ 2025-09-18 15:36:57,560 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5296 | Val rms_score: 0.6452
224
+ 2025-09-18 15:36:57,723 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
225
+ 2025-09-18 15:36:58,233 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6452
226
+ 2025-09-18 15:37:04,008 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4219 | Val rms_score: 0.7340
227
+ 2025-09-18 15:37:10,604 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.3487 | Val rms_score: 0.6487
228
+ 2025-09-18 15:37:13,713 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2829 | Val rms_score: 0.7285
229
+ 2025-09-18 15:37:19,324 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2511 | Val rms_score: 0.7354
230
+ 2025-09-18 15:37:25,269 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2204 | Val rms_score: 0.7542
231
+ 2025-09-18 15:37:30,836 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2324 | Val rms_score: 0.7759
232
+ 2025-09-18 15:37:36,441 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1785 | Val rms_score: 0.7242
233
+ 2025-09-18 15:37:42,164 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1711 | Val rms_score: 0.8019
234
+ 2025-09-18 15:37:45,300 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1571 | Val rms_score: 0.7292
235
+ 2025-09-18 15:37:51,253 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1406 | Val rms_score: 0.7256
236
+ 2025-09-18 15:37:56,907 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1488 | Val rms_score: 0.7400
237
+ 2025-09-18 15:38:02,651 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1328 | Val rms_score: 0.9027
238
+ 2025-09-18 15:38:08,408 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1349 | Val rms_score: 0.7931
239
+ 2025-09-18 15:38:14,117 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1348 | Val rms_score: 0.7430
240
+ 2025-09-18 15:38:17,706 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1160 | Val rms_score: 0.7390
241
+ 2025-09-18 15:38:23,431 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1308 | Val rms_score: 0.7265
242
+ 2025-09-18 15:38:29,103 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1186 | Val rms_score: 0.7096
243
+ 2025-09-18 15:38:34,797 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1020 | Val rms_score: 0.7409
244
+ 2025-09-18 15:38:40,524 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1020 | Val rms_score: 0.7065
245
+ 2025-09-18 15:38:43,935 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0994 | Val rms_score: 0.7211
246
+ 2025-09-18 15:38:49,625 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1036 | Val rms_score: 0.7418
247
+ 2025-09-18 15:38:55,347 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1074 | Val rms_score: 0.7463
248
+ 2025-09-18 15:39:01,024 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0884 | Val rms_score: 0.7804
249
+ 2025-09-18 15:39:06,802 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0925 | Val rms_score: 0.7655
250
+ 2025-09-18 15:39:13,772 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0859 | Val rms_score: 0.7506
251
+ 2025-09-18 15:39:16,953 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0876 | Val rms_score: 0.7344
252
+ 2025-09-18 15:39:22,671 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1562 | Val rms_score: 0.8034
253
+ 2025-09-18 15:39:28,383 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0831 | Val rms_score: 0.7403
254
+ 2025-09-18 15:39:34,085 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0810 | Val rms_score: 0.7763
255
+ 2025-09-18 15:39:40,156 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0859 | Val rms_score: 0.7459
256
+ 2025-09-18 15:39:43,633 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0843 | Val rms_score: 0.7185
257
+ 2025-09-18 15:39:50,381 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0748 | Val rms_score: 0.7366
258
+ 2025-09-18 15:39:56,081 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0755 | Val rms_score: 0.7102
259
+ 2025-09-18 15:40:01,739 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0752 | Val rms_score: 0.7821
260
+ 2025-09-18 15:40:07,985 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0749 | Val rms_score: 0.7577
261
+ 2025-09-18 15:40:13,729 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0728 | Val rms_score: 0.7663
262
+ 2025-09-18 15:40:17,235 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0674 | Val rms_score: 0.7081
263
+ 2025-09-18 15:40:22,858 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0805 | Val rms_score: 0.7517
264
+ 2025-09-18 15:40:28,715 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0769 | Val rms_score: 0.7842
265
+ 2025-09-18 15:40:34,648 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0703 | Val rms_score: 0.6907
266
+ 2025-09-18 15:40:40,226 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0680 | Val rms_score: 0.7296
267
+ 2025-09-18 15:40:45,872 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0609 | Val rms_score: 0.7249
268
+ 2025-09-18 15:40:49,011 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0660 | Val rms_score: 0.7238
269
+ 2025-09-18 15:40:54,736 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0600 | Val rms_score: 0.7683
270
+ 2025-09-18 15:41:00,702 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0588 | Val rms_score: 0.7768
271
+ 2025-09-18 15:41:06,347 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0635 | Val rms_score: 0.7232
272
+ 2025-09-18 15:41:12,029 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0674 | Val rms_score: 0.7322
273
+ 2025-09-18 15:41:15,212 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0600 | Val rms_score: 0.7581
274
+ 2025-09-18 15:41:20,758 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0604 | Val rms_score: 0.7956
275
+ 2025-09-18 15:41:26,723 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0621 | Val rms_score: 0.7756
276
+ 2025-09-18 15:41:33,342 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0675 | Val rms_score: 0.7447
277
+ 2025-09-18 15:41:38,973 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0576 | Val rms_score: 0.7568
278
+ 2025-09-18 15:41:44,751 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0547 | Val rms_score: 0.7575
279
+ 2025-09-18 15:41:47,885 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0619 | Val rms_score: 0.7909
280
+ 2025-09-18 15:41:53,880 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0539 | Val rms_score: 0.7299
281
+ 2025-09-18 15:41:59,498 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0586 | Val rms_score: 0.7317
282
+ 2025-09-18 15:42:05,053 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0559 | Val rms_score: 0.7510
283
+ 2025-09-18 15:42:10,593 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0547 | Val rms_score: 0.7534
284
+ 2025-09-18 15:42:19,064 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0569 | Val rms_score: 0.7802
285
+ 2025-09-18 15:42:19,791 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0522 | Val rms_score: 0.7683
286
+ 2025-09-18 15:42:25,419 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0559 | Val rms_score: 0.7531
287
+ 2025-09-18 15:42:31,155 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0549 | Val rms_score: 0.7602
288
+ 2025-09-18 15:42:36,866 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0559 | Val rms_score: 0.7670
289
+ 2025-09-18 15:42:42,507 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0549 | Val rms_score: 0.7325
290
+ 2025-09-18 15:42:45,898 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0502 | Val rms_score: 0.7704
291
+ 2025-09-18 15:42:51,589 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0518 | Val rms_score: 0.7627
292
+ 2025-09-18 15:42:57,352 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0462 | Val rms_score: 0.7321
293
+ 2025-09-18 15:43:03,131 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0465 | Val rms_score: 0.7458
294
+ 2025-09-18 15:43:09,029 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0489 | Val rms_score: 0.7348
295
+ 2025-09-18 15:43:15,639 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0499 | Val rms_score: 0.7453
296
+ 2025-09-18 15:43:18,998 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0539 | Val rms_score: 0.7253
297
+ 2025-09-18 15:43:24,684 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0570 | Val rms_score: 0.7394
298
+ 2025-09-18 15:43:30,470 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0495 | Val rms_score: 0.7383
299
+ 2025-09-18 15:43:36,197 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0510 | Val rms_score: 0.6911
300
+ 2025-09-18 15:43:42,269 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0457 | Val rms_score: 0.7474
301
+ 2025-09-18 15:43:45,451 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0458 | Val rms_score: 0.7368
302
+ 2025-09-18 15:43:52,137 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0732 | Val rms_score: 0.7451
303
+ 2025-09-18 15:43:57,868 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0508 | Val rms_score: 0.6926
304
+ 2025-09-18 15:44:03,570 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0512 | Val rms_score: 0.7463
305
+ 2025-09-18 15:44:09,544 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0493 | Val rms_score: 0.7542
306
+ 2025-09-18 15:44:15,219 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0463 | Val rms_score: 0.7493
307
+ 2025-09-18 15:44:18,398 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0475 | Val rms_score: 0.7244
308
+ 2025-09-18 15:44:24,138 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0500 | Val rms_score: 0.7211
309
+ 2025-09-18 15:44:29,767 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0485 | Val rms_score: 0.7340
310
+ 2025-09-18 15:44:35,691 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0570 | Val rms_score: 0.7309
311
+ 2025-09-18 15:44:41,294 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0526 | Val rms_score: 0.7086
312
+ 2025-09-18 15:44:44,489 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0450 | Val rms_score: 0.7538
313
+ 2025-09-18 15:44:50,298 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0473 | Val rms_score: 0.7467
314
+ 2025-09-18 15:44:55,970 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0473 | Val rms_score: 0.7320
315
+ 2025-09-18 15:45:01,916 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0421 | Val rms_score: 0.7522
316
+ 2025-09-18 15:45:07,536 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0464 | Val rms_score: 0.7981
317
+ 2025-09-18 15:45:13,171 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0426 | Val rms_score: 0.7156
318
+ 2025-09-18 15:45:16,327 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0371 | Val rms_score: 0.7860
319
+ 2025-09-18 15:45:21,970 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0448 | Val rms_score: 0.7472
320
+ 2025-09-18 15:45:27,954 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0444 | Val rms_score: 0.7251
321
+ 2025-09-18 15:45:33,603 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0446 | Val rms_score: 0.7562
322
+ 2025-09-18 15:45:39,218 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0432 | Val rms_score: 0.7597
323
+ 2025-09-18 15:45:44,861 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0461 | Val rms_score: 0.7167
324
+ 2025-09-18 15:45:45,438 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 1.2032
325
+ 2025-09-18 15:45:45,727 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 1.0893, Std Dev: 0.1319
logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250918_165507.log ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 16:55:07,846 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Running benchmark for dataset: clearance
2
+ 2025-09-18 16:55:07,847 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - dataset: clearance, tasks: ['target'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 16:55:07,856 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clearance at 2025-09-18_16-55-07
4
+ 2025-09-18 16:55:13,483 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 2.2143 | Val rms_score: 61.1544
5
+ 2025-09-18 16:55:13,483 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
6
+ 2025-09-18 16:55:14,173 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 61.1544
7
+ 2025-09-18 16:55:19,827 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.3571 | Val rms_score: 58.1079
8
+ 2025-09-18 16:55:20,001 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
9
+ 2025-09-18 16:55:20,554 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 58.1079
10
+ 2025-09-18 16:55:25,373 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 1.2143 | Val rms_score: 56.8632
11
+ 2025-09-18 16:55:25,548 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
12
+ 2025-09-18 16:55:26,116 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 56.8632
13
+ 2025-09-18 16:55:30,816 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 1.0595 | Val rms_score: 59.0617
14
+ 2025-09-18 16:55:35,875 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.8125 | Val rms_score: 59.0144
15
+ 2025-09-18 16:55:38,465 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.6875 | Val rms_score: 57.9125
16
+ 2025-09-18 16:55:44,589 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.5298 | Val rms_score: 59.0528
17
+ 2025-09-18 16:55:50,297 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.4613 | Val rms_score: 56.2325
18
+ 2025-09-18 16:55:50,502 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 168
19
+ 2025-09-18 16:55:51,116 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 56.2325
20
+ 2025-09-18 16:55:57,443 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3676 | Val rms_score: 57.4713
21
+ 2025-09-18 16:56:03,476 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3000 | Val rms_score: 55.3562
22
+ 2025-09-18 16:56:03,664 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 210
23
+ 2025-09-18 16:56:04,221 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 55.3562
24
+ 2025-09-18 16:56:07,008 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2515 | Val rms_score: 55.1209
25
+ 2025-09-18 16:56:07,601 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 231
26
+ 2025-09-18 16:56:08,180 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 55.1209
27
+ 2025-09-18 16:56:13,280 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1979 | Val rms_score: 54.4859
28
+ 2025-09-18 16:56:13,458 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 252
29
+ 2025-09-18 16:56:14,001 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 54.4859
30
+ 2025-09-18 16:56:19,968 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1949 | Val rms_score: 54.3651
31
+ 2025-09-18 16:56:20,160 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 273
32
+ 2025-09-18 16:56:20,765 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val rms_score: 54.3651
33
+ 2025-09-18 16:56:26,723 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1882 | Val rms_score: 53.7390
34
+ 2025-09-18 16:56:26,895 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 294
35
+ 2025-09-18 16:56:27,445 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 53.7390
36
+ 2025-09-18 16:56:32,703 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1521 | Val rms_score: 54.7046
37
+ 2025-09-18 16:56:40,591 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1518 | Val rms_score: 53.7714
38
+ 2025-09-18 16:56:40,789 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1235 | Val rms_score: 52.9353
39
+ 2025-09-18 16:56:40,970 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 357
40
+ 2025-09-18 16:56:41,593 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val rms_score: 52.9353
41
+ 2025-09-18 16:56:46,638 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1324 | Val rms_score: 53.3860
42
+ 2025-09-18 16:56:51,683 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1190 | Val rms_score: 52.5321
43
+ 2025-09-18 16:56:51,841 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 399
44
+ 2025-09-18 16:56:52,431 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 19 with val rms_score: 52.5321
45
+ 2025-09-18 16:56:57,758 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1227 | Val rms_score: 52.1580
46
+ 2025-09-18 16:56:57,938 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 420
47
+ 2025-09-18 16:56:58,465 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val rms_score: 52.1580
48
+ 2025-09-18 16:57:04,152 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1094 | Val rms_score: 52.8174
49
+ 2025-09-18 16:57:07,409 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1079 | Val rms_score: 51.9360
50
+ 2025-09-18 16:57:07,584 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 462
51
+ 2025-09-18 16:57:08,112 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 22 with val rms_score: 51.9360
52
+ 2025-09-18 16:57:13,482 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1220 | Val rms_score: 53.4333
53
+ 2025-09-18 16:57:18,829 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1030 | Val rms_score: 52.6741
54
+ 2025-09-18 16:57:23,299 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0960 | Val rms_score: 53.3154
55
+ 2025-09-18 16:57:27,692 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0952 | Val rms_score: 52.7268
56
+ 2025-09-18 16:57:33,275 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0867 | Val rms_score: 52.1380
57
+ 2025-09-18 16:57:35,921 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0822 | Val rms_score: 50.5847
58
+ 2025-09-18 16:57:36,096 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 588
59
+ 2025-09-18 16:57:36,654 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val rms_score: 50.5847
60
+ 2025-09-18 16:57:41,507 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0929 | Val rms_score: 52.2534
61
+ 2025-09-18 16:57:46,448 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0997 | Val rms_score: 53.6963
62
+ 2025-09-18 16:57:51,000 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0833 | Val rms_score: 52.8989
63
+ 2025-09-18 16:57:56,555 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0789 | Val rms_score: 51.8916
64
+ 2025-09-18 16:58:01,679 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0722 | Val rms_score: 53.8006
65
+ 2025-09-18 16:58:06,692 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0776 | Val rms_score: 51.4820
66
+ 2025-09-18 16:58:09,399 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0796 | Val rms_score: 52.9924
67
+ 2025-09-18 16:58:14,168 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0789 | Val rms_score: 51.8944
68
+ 2025-09-18 16:58:19,842 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0725 | Val rms_score: 53.4781
69
+ 2025-09-18 16:58:25,096 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0677 | Val rms_score: 54.5994
70
+ 2025-09-18 16:58:29,549 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0625 | Val rms_score: 52.2768
71
+ 2025-09-18 16:58:34,584 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0699 | Val rms_score: 51.5763
72
+ 2025-09-18 16:58:36,534 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0733 | Val rms_score: 51.9349
73
+ 2025-09-18 16:58:41,782 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0610 | Val rms_score: 52.6803
74
+ 2025-09-18 16:58:46,423 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0488 | Val rms_score: 52.3851
75
+ 2025-09-18 16:58:51,247 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0632 | Val rms_score: 51.2110
76
+ 2025-09-18 16:58:56,341 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0670 | Val rms_score: 52.4853
77
+ 2025-09-18 16:59:00,912 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0666 | Val rms_score: 50.8082
78
+ 2025-09-18 16:59:06,077 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0595 | Val rms_score: 52.2017
79
+ 2025-09-18 16:59:10,202 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0464 | Val rms_score: 52.7613
80
+ 2025-09-18 16:59:15,450 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0625 | Val rms_score: 52.3874
81
+ 2025-09-18 16:59:20,701 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0621 | Val rms_score: 52.4707
82
+ 2025-09-18 16:59:25,773 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0625 | Val rms_score: 53.2360
83
+ 2025-09-18 16:59:31,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0603 | Val rms_score: 53.2071
84
+ 2025-09-18 16:59:37,290 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0607 | Val rms_score: 52.2401
85
+ 2025-09-18 16:59:40,097 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0513 | Val rms_score: 53.7127
86
+ 2025-09-18 16:59:44,980 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0610 | Val rms_score: 51.7639
87
+ 2025-09-18 16:59:49,385 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0569 | Val rms_score: 51.5034
88
+ 2025-09-18 16:59:54,024 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0636 | Val rms_score: 51.6929
89
+ 2025-09-18 16:59:59,111 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0564 | Val rms_score: 52.5876
90
+ 2025-09-18 17:00:04,073 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0499 | Val rms_score: 51.0426
91
+ 2025-09-18 17:00:06,493 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0580 | Val rms_score: 51.8545
92
+ 2025-09-18 17:00:11,200 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0554 | Val rms_score: 52.9813
93
+ 2025-09-18 17:00:16,739 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0649 | Val rms_score: 52.3612
94
+ 2025-09-18 17:00:21,706 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0547 | Val rms_score: 51.1630
95
+ 2025-09-18 17:00:26,842 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0521 | Val rms_score: 51.2441
96
+ 2025-09-18 17:00:31,119 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0476 | Val rms_score: 51.9374
97
+ 2025-09-18 17:00:36,296 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0474 | Val rms_score: 52.0766
98
+ 2025-09-18 17:00:39,382 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0502 | Val rms_score: 52.5399
99
+ 2025-09-18 17:00:44,675 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0614 | Val rms_score: 53.9461
100
+ 2025-09-18 17:00:49,864 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0595 | Val rms_score: 52.4088
101
+ 2025-09-18 17:00:55,049 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0673 | Val rms_score: 53.1054
102
+ 2025-09-18 17:01:00,307 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0517 | Val rms_score: 51.9827
103
+ 2025-09-18 17:01:05,925 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0521 | Val rms_score: 52.8894
104
+ 2025-09-18 17:01:08,576 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0474 | Val rms_score: 52.8651
105
+ 2025-09-18 17:01:14,229 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0499 | Val rms_score: 52.8321
106
+ 2025-09-18 17:01:20,426 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0428 | Val rms_score: 52.8546
107
+ 2025-09-18 17:01:25,789 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0428 | Val rms_score: 53.3140
108
+ 2025-09-18 17:01:31,970 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0400 | Val rms_score: 52.7799
109
+ 2025-09-18 17:01:37,717 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0476 | Val rms_score: 52.7160
110
+ 2025-09-18 17:01:41,179 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0506 | Val rms_score: 51.4765
111
+ 2025-09-18 17:01:47,452 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0463 | Val rms_score: 53.5260
112
+ 2025-09-18 17:01:53,753 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0342 | Val rms_score: 52.3786
113
+ 2025-09-18 17:02:00,098 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0432 | Val rms_score: 52.3862
114
+ 2025-09-18 17:02:05,784 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0422 | Val rms_score: 53.0819
115
+ 2025-09-18 17:02:09,075 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0469 | Val rms_score: 52.6880
116
+ 2025-09-18 17:02:14,534 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0472 | Val rms_score: 52.7313
117
+ 2025-09-18 17:02:19,923 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0339 | Val rms_score: 51.3399
118
+ 2025-09-18 17:02:25,563 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0398 | Val rms_score: 51.3088
119
+ 2025-09-18 17:02:30,749 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0491 | Val rms_score: 54.2879
120
+ 2025-09-18 17:02:35,649 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0510 | Val rms_score: 51.7264
121
+ 2025-09-18 17:02:39,256 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0487 | Val rms_score: 53.0400
122
+ 2025-09-18 17:02:45,155 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0417 | Val rms_score: 52.7292
123
+ 2025-09-18 17:02:51,421 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0420 | Val rms_score: 53.5426
124
+ 2025-09-18 17:02:57,191 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0432 | Val rms_score: 52.1907
125
+ 2025-09-18 17:03:02,910 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0426 | Val rms_score: 52.6583
126
+ 2025-09-18 17:03:06,170 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0409 | Val rms_score: 52.8121
127
+ 2025-09-18 17:03:12,193 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0437 | Val rms_score: 53.1250
128
+ 2025-09-18 17:03:17,909 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0413 | Val rms_score: 52.8915
129
+ 2025-09-18 17:03:22,955 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0432 | Val rms_score: 52.8309
130
+ 2025-09-18 17:03:28,088 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0346 | Val rms_score: 52.5785
131
+ 2025-09-18 17:03:33,046 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0433 | Val rms_score: 52.3957
132
+ 2025-09-18 17:03:33,613 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 50.7912
133
+ 2025-09-18 17:03:34,012 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clearance at 2025-09-18_17-03-34
134
+ 2025-09-18 17:03:36,149 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 2.6429 | Val rms_score: 60.6980
135
+ 2025-09-18 17:03:36,149 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
136
+ 2025-09-18 17:03:37,178 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 60.6980
137
+ 2025-09-18 17:03:42,513 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.3036 | Val rms_score: 57.5811
138
+ 2025-09-18 17:03:42,678 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
139
+ 2025-09-18 17:03:43,204 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 57.5811
140
+ 2025-09-18 17:03:49,653 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 1.1250 | Val rms_score: 55.7058
141
+ 2025-09-18 17:03:49,823 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
142
+ 2025-09-18 17:03:50,373 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 55.7058
143
+ 2025-09-18 17:03:55,863 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 1.0060 | Val rms_score: 56.1520
144
+ 2025-09-18 17:04:01,396 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 1.0250 | Val rms_score: 55.0280
145
+ 2025-09-18 17:04:01,574 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 105
146
+ 2025-09-18 17:04:02,130 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 55.0280
147
+ 2025-09-18 17:04:07,346 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.7500 | Val rms_score: 57.3045
148
+ 2025-09-18 17:04:10,300 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.6369 | Val rms_score: 55.8529
149
+ 2025-09-18 17:04:15,131 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.5000 | Val rms_score: 54.2228
150
+ 2025-09-18 17:04:15,342 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 168
151
+ 2025-09-18 17:04:15,894 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 54.2228
152
+ 2025-09-18 17:04:21,602 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3661 | Val rms_score: 55.1627
153
+ 2025-09-18 17:04:26,896 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3438 | Val rms_score: 55.8465
154
+ 2025-09-18 17:04:32,340 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2738 | Val rms_score: 52.7986
155
+ 2025-09-18 17:04:32,898 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 231
156
+ 2025-09-18 17:04:33,460 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 52.7986
157
+ 2025-09-18 17:04:36,973 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2336 | Val rms_score: 53.9616
158
+ 2025-09-18 17:04:42,791 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2113 | Val rms_score: 51.3500
159
+ 2025-09-18 17:04:42,968 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 273
160
+ 2025-09-18 17:04:43,519 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val rms_score: 51.3500
161
+ 2025-09-18 17:04:48,544 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2128 | Val rms_score: 53.5134
162
+ 2025-09-18 17:04:53,621 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1542 | Val rms_score: 51.6614
163
+ 2025-09-18 17:04:58,963 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1555 | Val rms_score: 51.7230
164
+ 2025-09-18 17:05:04,119 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1592 | Val rms_score: 51.7269
165
+ 2025-09-18 17:05:06,593 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1429 | Val rms_score: 51.6969
166
+ 2025-09-18 17:05:12,879 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1369 | Val rms_score: 52.0321
167
+ 2025-09-18 17:05:18,054 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1234 | Val rms_score: 52.5601
168
+ 2025-09-18 17:05:23,395 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1257 | Val rms_score: 52.7619
169
+ 2025-09-18 17:05:29,459 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1019 | Val rms_score: 52.9677
170
+ 2025-09-18 17:05:35,749 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1124 | Val rms_score: 51.1271
171
+ 2025-09-18 17:05:35,894 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 483
172
+ 2025-09-18 17:05:36,443 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 23 with val rms_score: 51.1271
173
+ 2025-09-18 17:05:40,039 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1230 | Val rms_score: 52.1883
174
+ 2025-09-18 17:05:45,427 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1019 | Val rms_score: 52.7912
175
+ 2025-09-18 17:05:51,022 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0938 | Val rms_score: 53.7876
176
+ 2025-09-18 17:05:56,550 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1027 | Val rms_score: 52.5843
177
+ 2025-09-18 17:06:01,011 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0952 | Val rms_score: 51.8363
178
+ 2025-09-18 17:06:05,539 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0833 | Val rms_score: 52.4591
179
+ 2025-09-18 17:06:08,337 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0885 | Val rms_score: 52.3155
180
+ 2025-09-18 17:06:13,907 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0900 | Val rms_score: 51.7799
181
+ 2025-09-18 17:06:19,320 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0837 | Val rms_score: 52.8410
182
+ 2025-09-18 17:06:24,163 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0826 | Val rms_score: 52.4037
183
+ 2025-09-18 17:06:29,812 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0748 | Val rms_score: 52.3069
184
+ 2025-09-18 17:06:35,231 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0796 | Val rms_score: 51.2664
185
+ 2025-09-18 17:06:38,582 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0763 | Val rms_score: 52.5281
186
+ 2025-09-18 17:06:44,091 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0789 | Val rms_score: 53.4402
187
+ 2025-09-18 17:06:49,336 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0778 | Val rms_score: 52.5719
188
+ 2025-09-18 17:06:54,246 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0724 | Val rms_score: 52.6656
189
+ 2025-09-18 17:06:59,120 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0737 | Val rms_score: 53.4322
190
+ 2025-09-18 17:07:04,473 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0792 | Val rms_score: 52.9267
191
+ 2025-09-18 17:07:07,003 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0737 | Val rms_score: 53.1494
192
+ 2025-09-18 17:07:12,190 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0618 | Val rms_score: 52.5423
193
+ 2025-09-18 17:07:17,154 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0673 | Val rms_score: 53.6407
194
+ 2025-09-18 17:07:21,925 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0699 | Val rms_score: 52.5128
195
+ 2025-09-18 17:07:27,011 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0688 | Val rms_score: 54.1015
196
+ 2025-09-18 17:07:32,692 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0651 | Val rms_score: 52.9491
197
+ 2025-09-18 17:07:36,835 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0806 | Val rms_score: 52.3724
198
+ 2025-09-18 17:07:42,090 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0703 | Val rms_score: 52.1940
199
+ 2025-09-18 17:07:47,270 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0666 | Val rms_score: 52.5279
200
+ 2025-09-18 17:07:52,708 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0644 | Val rms_score: 53.2367
201
+ 2025-09-18 17:07:57,876 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0658 | Val rms_score: 52.7527
202
+ 2025-09-18 17:08:02,937 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0595 | Val rms_score: 52.0069
203
+ 2025-09-18 17:08:08,173 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0565 | Val rms_score: 52.0272
204
+ 2025-09-18 17:08:11,053 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0592 | Val rms_score: 51.9690
205
+ 2025-09-18 17:08:16,615 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0528 | Val rms_score: 52.2047
206
+ 2025-09-18 17:08:22,164 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0577 | Val rms_score: 52.2641
207
+ 2025-09-18 17:08:27,335 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0616 | Val rms_score: 51.9666
208
+ 2025-09-18 17:08:32,347 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0592 | Val rms_score: 51.3066
209
+ 2025-09-18 17:08:37,209 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0707 | Val rms_score: 52.6761
210
+ 2025-09-18 17:08:39,854 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0554 | Val rms_score: 52.5257
211
+ 2025-09-18 17:08:45,072 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0530 | Val rms_score: 52.7124
212
+ 2025-09-18 17:08:49,781 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0588 | Val rms_score: 52.6598
213
+ 2025-09-18 17:08:54,647 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0495 | Val rms_score: 52.1926
214
+ 2025-09-18 17:08:59,271 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0525 | Val rms_score: 52.8471
215
+ 2025-09-18 17:09:04,310 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0569 | Val rms_score: 53.4107
216
+ 2025-09-18 17:09:07,076 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0497 | Val rms_score: 51.8918
217
+ 2025-09-18 17:09:11,927 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0539 | Val rms_score: 51.6266
218
+ 2025-09-18 17:09:16,712 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0513 | Val rms_score: 52.1805
219
+ 2025-09-18 17:09:21,288 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0547 | Val rms_score: 52.3698
220
+ 2025-09-18 17:09:26,101 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0532 | Val rms_score: 52.4844
221
+ 2025-09-18 17:09:31,492 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0563 | Val rms_score: 52.2045
222
+ 2025-09-18 17:09:36,411 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0558 | Val rms_score: 51.1962
223
+ 2025-09-18 17:09:39,034 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0625 | Val rms_score: 53.1220
224
+ 2025-09-18 17:09:43,794 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0573 | Val rms_score: 51.3499
225
+ 2025-09-18 17:09:48,601 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0599 | Val rms_score: 52.9216
226
+ 2025-09-18 17:09:54,381 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0561 | Val rms_score: 52.1592
227
+ 2025-09-18 17:09:59,817 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0517 | Val rms_score: 52.0992
228
+ 2025-09-18 17:10:05,457 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0484 | Val rms_score: 52.2322
229
+ 2025-09-18 17:10:08,280 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0536 | Val rms_score: 52.1821
230
+ 2025-09-18 17:10:13,856 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0376 | Val rms_score: 52.2627
231
+ 2025-09-18 17:10:20,381 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0446 | Val rms_score: 51.7626
232
+ 2025-09-18 17:10:26,499 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0502 | Val rms_score: 52.1897
233
+ 2025-09-18 17:10:32,652 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0528 | Val rms_score: 52.2582
234
+ 2025-09-18 17:10:36,506 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0480 | Val rms_score: 52.8207
235
+ 2025-09-18 17:10:42,636 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0521 | Val rms_score: 53.0955
236
+ 2025-09-18 17:10:48,238 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0532 | Val rms_score: 53.2082
237
+ 2025-09-18 17:10:54,566 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0441 | Val rms_score: 53.0001
238
+ 2025-09-18 17:11:00,789 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0461 | Val rms_score: 52.8867
239
+ 2025-09-18 17:11:07,040 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0502 | Val rms_score: 52.7715
240
+ 2025-09-18 17:11:10,717 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0511 | Val rms_score: 52.3469
241
+ 2025-09-18 17:11:16,822 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0452 | Val rms_score: 53.0451
242
+ 2025-09-18 17:11:22,434 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0450 | Val rms_score: 53.1136
243
+ 2025-09-18 17:11:27,944 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0469 | Val rms_score: 52.6412
244
+ 2025-09-18 17:11:33,586 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0446 | Val rms_score: 52.6505
245
+ 2025-09-18 17:11:37,281 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0430 | Val rms_score: 52.5640
246
+ 2025-09-18 17:11:42,804 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0424 | Val rms_score: 52.3682
247
+ 2025-09-18 17:11:47,989 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0443 | Val rms_score: 53.0449
248
+ 2025-09-18 17:11:52,905 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0456 | Val rms_score: 53.2960
249
+ 2025-09-18 17:11:57,999 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0469 | Val rms_score: 52.7691
250
+ 2025-09-18 17:11:58,631 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 48.3235
251
+ 2025-09-18 17:11:59,016 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clearance at 2025-09-18_17-11-59
252
+ 2025-09-18 17:12:04,156 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 2.2857 | Val rms_score: 57.8271
253
+ 2025-09-18 17:12:04,156 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
254
+ 2025-09-18 17:12:05,031 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 57.8271
255
+ 2025-09-18 17:12:08,630 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.2857 | Val rms_score: 57.3257
256
+ 2025-09-18 17:12:08,807 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
257
+ 2025-09-18 17:12:09,383 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 57.3257
258
+ 2025-09-18 17:12:15,263 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 1.1310 | Val rms_score: 58.5092
259
+ 2025-09-18 17:12:21,406 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.9881 | Val rms_score: 56.3295
260
+ 2025-09-18 17:12:21,578 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 84
261
+ 2025-09-18 17:12:22,165 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 56.3295
262
+ 2025-09-18 17:12:27,312 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.9125 | Val rms_score: 57.0601
263
+ 2025-09-18 17:12:32,675 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.7470 | Val rms_score: 54.9522
264
+ 2025-09-18 17:12:33,207 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 126
265
+ 2025-09-18 17:12:33,782 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 54.9522
266
+ 2025-09-18 17:12:38,874 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.6220 | Val rms_score: 55.6346
267
+ 2025-09-18 17:12:40,857 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.5327 | Val rms_score: 57.5417
268
+ 2025-09-18 17:12:45,710 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.4405 | Val rms_score: 57.1254
269
+ 2025-09-18 17:12:50,389 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3594 | Val rms_score: 57.7206
270
+ 2025-09-18 17:12:55,254 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2753 | Val rms_score: 58.1662
271
+ 2025-09-18 17:13:00,668 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2426 | Val rms_score: 57.2594
272
+ 2025-09-18 17:13:05,077 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2307 | Val rms_score: 57.4651
273
+ 2025-09-18 17:13:07,686 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2188 | Val rms_score: 57.0650
274
+ 2025-09-18 17:13:12,172 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1854 | Val rms_score: 56.0193
275
+ 2025-09-18 17:13:17,038 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1540 | Val rms_score: 56.8276
276
+ 2025-09-18 17:13:21,616 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1518 | Val rms_score: 57.1333
277
+ 2025-09-18 17:13:28,676 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1369 | Val rms_score: 55.2392
278
+ 2025-09-18 17:13:34,271 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1310 | Val rms_score: 57.2004
279
+ 2025-09-18 17:13:37,859 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1320 | Val rms_score: 56.8369
280
+ 2025-09-18 17:13:44,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1317 | Val rms_score: 56.7628
281
+ 2025-09-18 17:13:50,212 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1198 | Val rms_score: 55.3737
282
+ 2025-09-18 17:13:56,845 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1220 | Val rms_score: 56.1737
283
+ 2025-09-18 17:14:03,731 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1367 | Val rms_score: 53.9560
284
+ 2025-09-18 17:14:03,878 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 504
285
+ 2025-09-18 17:14:04,468 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 24 with val rms_score: 53.9560
286
+ 2025-09-18 17:14:08,708 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1131 | Val rms_score: 55.3113
287
+ 2025-09-18 17:14:12,919 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1101 | Val rms_score: 54.9554
288
+ 2025-09-18 17:14:19,123 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0952 | Val rms_score: 56.8570
289
+ 2025-09-18 17:14:25,046 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1034 | Val rms_score: 53.9103
290
+ 2025-09-18 17:14:25,190 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 588
291
+ 2025-09-18 17:14:25,740 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val rms_score: 53.9103
292
+ 2025-09-18 17:14:32,544 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1085 | Val rms_score: 53.6685
293
+ 2025-09-18 17:14:32,721 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 609
294
+ 2025-09-18 17:14:33,308 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 29 with val rms_score: 53.6685
295
+ 2025-09-18 17:14:37,579 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0923 | Val rms_score: 54.4434
296
+ 2025-09-18 17:14:42,685 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0960 | Val rms_score: 53.2019
297
+ 2025-09-18 17:14:43,190 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 651
298
+ 2025-09-18 17:14:43,761 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 31 with val rms_score: 53.2019
299
+ 2025-09-18 17:14:50,171 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0837 | Val rms_score: 54.0170
300
+ 2025-09-18 17:14:57,023 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0859 | Val rms_score: 52.7067
301
+ 2025-09-18 17:14:57,171 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 693
302
+ 2025-09-18 17:14:57,886 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 33 with val rms_score: 52.7067
303
+ 2025-09-18 17:15:03,529 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0982 | Val rms_score: 54.6728
304
+ 2025-09-18 17:15:07,228 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0904 | Val rms_score: 53.3788
305
+ 2025-09-18 17:15:14,053 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0781 | Val rms_score: 54.0965
306
+ 2025-09-18 17:15:19,781 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0826 | Val rms_score: 52.6046
307
+ 2025-09-18 17:15:19,956 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 777
308
+ 2025-09-18 17:15:20,568 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 37 with val rms_score: 52.6046
309
+ 2025-09-18 17:15:25,985 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0878 | Val rms_score: 54.7235
310
+ 2025-09-18 17:15:32,753 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0736 | Val rms_score: 52.6569
311
+ 2025-09-18 17:15:38,198 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0815 | Val rms_score: 53.1234
312
+ 2025-09-18 17:15:42,083 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0703 | Val rms_score: 53.1650
313
+ 2025-09-18 17:15:49,229 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0766 | Val rms_score: 52.7212
314
+ 2025-09-18 17:15:54,110 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0736 | Val rms_score: 51.1278
315
+ 2025-09-18 17:15:54,266 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 903
316
+ 2025-09-18 17:15:54,845 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 43 with val rms_score: 51.1278
317
+ 2025-09-18 17:16:01,290 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0737 | Val rms_score: 52.5277
318
+ 2025-09-18 17:16:08,254 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0722 | Val rms_score: 53.7523
319
+ 2025-09-18 17:16:10,733 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0640 | Val rms_score: 52.8571
320
+ 2025-09-18 17:16:17,992 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0658 | Val rms_score: 52.1033
321
+ 2025-09-18 17:16:25,730 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0654 | Val rms_score: 53.3582
322
+ 2025-09-18 17:16:30,773 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0655 | Val rms_score: 51.2717
323
+ 2025-09-18 17:16:37,508 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0718 | Val rms_score: 54.0762
324
+ 2025-09-18 17:16:41,907 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0606 | Val rms_score: 52.3755
325
+ 2025-09-18 17:16:47,454 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0565 | Val rms_score: 52.8565
326
+ 2025-09-18 17:16:54,185 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0712 | Val rms_score: 52.7593
327
+ 2025-09-18 17:17:00,711 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0640 | Val rms_score: 51.5372
328
+ 2025-09-18 17:17:05,949 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0610 | Val rms_score: 51.6843
329
+ 2025-09-18 17:17:10,195 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0606 | Val rms_score: 51.7729
330
+ 2025-09-18 17:17:16,305 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0573 | Val rms_score: 51.7793
331
+ 2025-09-18 17:17:23,014 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0582 | Val rms_score: 51.7561
332
+ 2025-09-18 17:17:29,165 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0599 | Val rms_score: 52.2954
333
+ 2025-09-18 17:17:34,892 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0562 | Val rms_score: 52.0169
334
+ 2025-09-18 17:17:39,096 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0610 | Val rms_score: 50.4081
335
+ 2025-09-18 17:17:39,575 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 1281
336
+ 2025-09-18 17:17:40,164 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 61 with val rms_score: 50.4081
337
+ 2025-09-18 17:17:46,912 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0796 | Val rms_score: 52.5575
338
+ 2025-09-18 17:17:52,207 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0703 | Val rms_score: 51.8479
339
+ 2025-09-18 17:17:59,094 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0539 | Val rms_score: 51.6489
340
+ 2025-09-18 17:18:04,554 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0554 | Val rms_score: 51.8947
341
+ 2025-09-18 17:18:08,372 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0536 | Val rms_score: 51.1235
342
+ 2025-09-18 17:18:15,514 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0555 | Val rms_score: 50.9129
343
+ 2025-09-18 17:18:20,532 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0506 | Val rms_score: 51.4548
344
+ 2025-09-18 17:18:27,268 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0539 | Val rms_score: 50.6456
345
+ 2025-09-18 17:18:33,822 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0517 | Val rms_score: 50.8350
346
+ 2025-09-18 17:18:39,131 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0487 | Val rms_score: 50.7064
347
+ 2025-09-18 17:18:43,602 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0492 | Val rms_score: 51.4145
348
+ 2025-09-18 17:18:49,342 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0513 | Val rms_score: 51.2319
349
+ 2025-09-18 17:18:55,571 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0521 | Val rms_score: 51.5065
350
+ 2025-09-18 17:19:02,231 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0558 | Val rms_score: 50.5890
351
+ 2025-09-18 17:19:07,222 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0580 | Val rms_score: 50.7649
352
+ 2025-09-18 17:19:11,856 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0547 | Val rms_score: 51.0917
353
+ 2025-09-18 17:19:18,450 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0528 | Val rms_score: 50.8573
354
+ 2025-09-18 17:19:23,616 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0491 | Val rms_score: 50.0384
355
+ 2025-09-18 17:19:23,767 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 1659
356
+ 2025-09-18 17:19:24,344 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 79 with val rms_score: 50.0384
357
+ 2025-09-18 17:19:31,067 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0426 | Val rms_score: 50.1317
358
+ 2025-09-18 17:19:37,205 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0371 | Val rms_score: 50.6637
359
+ 2025-09-18 17:19:40,407 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0536 | Val rms_score: 51.4800
360
+ 2025-09-18 17:19:47,018 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0491 | Val rms_score: 50.9289
361
+ 2025-09-18 17:19:52,739 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0467 | Val rms_score: 50.3168
362
+ 2025-09-18 17:19:58,833 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0454 | Val rms_score: 50.6393
363
+ 2025-09-18 17:20:05,719 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0524 | Val rms_score: 50.4954
364
+ 2025-09-18 17:20:08,849 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0450 | Val rms_score: 50.3917
365
+ 2025-09-18 17:20:15,294 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0487 | Val rms_score: 51.3629
366
+ 2025-09-18 17:20:21,904 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0476 | Val rms_score: 51.1074
367
+ 2025-09-18 17:20:26,969 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0459 | Val rms_score: 50.4134
368
+ 2025-09-18 17:20:33,663 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0522 | Val rms_score: 51.5045
369
+ 2025-09-18 17:20:37,795 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0517 | Val rms_score: 50.1380
370
+ 2025-09-18 17:20:43,457 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0491 | Val rms_score: 50.2774
371
+ 2025-09-18 17:20:50,259 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0536 | Val rms_score: 51.9770
372
+ 2025-09-18 17:20:55,492 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0539 | Val rms_score: 51.3803
373
+ 2025-09-18 17:21:02,019 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0447 | Val rms_score: 50.9750
374
+ 2025-09-18 17:21:09,203 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0461 | Val rms_score: 51.1540
375
+ 2025-09-18 17:21:12,785 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0484 | Val rms_score: 51.6215
376
+ 2025-09-18 17:21:18,368 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0458 | Val rms_score: 50.5329
377
+ 2025-09-18 17:21:24,723 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0484 | Val rms_score: 50.4939
378
+ 2025-09-18 17:21:25,435 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 47.8868
379
+ 2025-09-18 17:21:25,809 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 49.0005, Std Dev: 1.2787
logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_150151.log ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:01:51,724 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Running benchmark for dataset: delaney
2
+ 2025-09-18 15:01:51,725 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - dataset: delaney, tasks: ['measured_log_solubility_in_mols_per_litre'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 15:01:51,730 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset delaney at 2025-09-18_15-01-51
4
+ 2025-09-18 15:01:57,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.8147 | Val rms_score: 1.1999
5
+ 2025-09-18 15:01:57,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
6
+ 2025-09-18 15:01:58,381 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.1999
7
+ 2025-09-18 15:02:01,898 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1692 | Val rms_score: 1.0994
8
+ 2025-09-18 15:02:02,059 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
9
+ 2025-09-18 15:02:02,542 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0994
10
+ 2025-09-18 15:02:06,038 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1228 | Val rms_score: 1.0681
11
+ 2025-09-18 15:02:06,204 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
12
+ 2025-09-18 15:02:06,711 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.0681
13
+ 2025-09-18 15:02:10,626 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0947 | Val rms_score: 1.0611
14
+ 2025-09-18 15:02:10,790 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 116
15
+ 2025-09-18 15:02:11,284 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 1.0611
16
+ 2025-09-18 15:02:12,659 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0867 | Val rms_score: 1.0491
17
+ 2025-09-18 15:02:12,830 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
18
+ 2025-09-18 15:02:13,331 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 1.0491
19
+ 2025-09-18 15:02:17,165 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0862 | Val rms_score: 1.0776
20
+ 2025-09-18 15:02:21,205 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0677 | Val rms_score: 1.0572
21
+ 2025-09-18 15:02:24,924 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0657 | Val rms_score: 1.0797
22
+ 2025-09-18 15:02:28,538 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0606 | Val rms_score: 1.0231
23
+ 2025-09-18 15:02:28,671 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
24
+ 2025-09-18 15:02:29,244 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 1.0231
25
+ 2025-09-18 15:02:32,763 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0533 | Val rms_score: 1.0764
26
+ 2025-09-18 15:02:36,355 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0477 | Val rms_score: 1.0549
27
+ 2025-09-18 15:02:40,121 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0453 | Val rms_score: 1.0347
28
+ 2025-09-18 15:02:41,085 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0399 | Val rms_score: 1.0432
29
+ 2025-09-18 15:02:44,608 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0322 | Val rms_score: 1.0538
30
+ 2025-09-18 15:02:48,085 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0372 | Val rms_score: 1.0325
31
+ 2025-09-18 15:02:51,607 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0364 | Val rms_score: 1.0324
32
+ 2025-09-18 15:02:55,624 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0356 | Val rms_score: 1.0371
33
+ 2025-09-18 15:02:59,300 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0328 | Val rms_score: 1.0874
34
+ 2025-09-18 15:03:02,984 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0353 | Val rms_score: 1.1024
35
+ 2025-09-18 15:03:06,756 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0333 | Val rms_score: 1.0446
36
+ 2025-09-18 15:03:10,389 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0273 | Val rms_score: 1.0622
37
+ 2025-09-18 15:03:11,867 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0310 | Val rms_score: 1.0745
38
+ 2025-09-18 15:03:15,520 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0275 | Val rms_score: 1.0683
39
+ 2025-09-18 15:03:19,111 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0300 | Val rms_score: 1.0754
40
+ 2025-09-18 15:03:22,721 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0291 | Val rms_score: 1.1573
41
+ 2025-09-18 15:03:26,276 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0695 | Val rms_score: 0.9453
42
+ 2025-09-18 15:03:26,697 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 754
43
+ 2025-09-18 15:03:27,225 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 26 with val rms_score: 0.9453
44
+ 2025-09-18 15:03:30,897 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0314 | Val rms_score: 0.9412
45
+ 2025-09-18 15:03:31,068 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 783
46
+ 2025-09-18 15:03:31,592 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val rms_score: 0.9412
47
+ 2025-09-18 15:03:35,379 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0278 | Val rms_score: 0.9290
48
+ 2025-09-18 15:03:35,550 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 812
49
+ 2025-09-18 15:03:36,072 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val rms_score: 0.9290
50
+ 2025-09-18 15:03:39,723 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0252 | Val rms_score: 0.9357
51
+ 2025-09-18 15:03:40,814 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0242 | Val rms_score: 0.9438
52
+ 2025-09-18 15:03:44,430 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0255 | Val rms_score: 0.9521
53
+ 2025-09-18 15:03:48,448 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0259 | Val rms_score: 0.9478
54
+ 2025-09-18 15:03:52,127 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0230 | Val rms_score: 0.9409
55
+ 2025-09-18 15:03:55,723 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0228 | Val rms_score: 0.9390
56
+ 2025-09-18 15:04:00,382 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0237 | Val rms_score: 0.9550
57
+ 2025-09-18 15:04:04,003 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0214 | Val rms_score: 0.9653
58
+ 2025-09-18 15:04:08,008 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0220 | Val rms_score: 0.9504
59
+ 2025-09-18 15:04:11,619 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0237 | Val rms_score: 0.9539
60
+ 2025-09-18 15:04:12,734 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0224 | Val rms_score: 0.9524
61
+ 2025-09-18 15:04:16,259 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0217 | Val rms_score: 0.9652
62
+ 2025-09-18 15:04:19,804 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0199 | Val rms_score: 0.9487
63
+ 2025-09-18 15:04:23,700 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0180 | Val rms_score: 0.9420
64
+ 2025-09-18 15:04:27,198 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0202 | Val rms_score: 0.9566
65
+ 2025-09-18 15:04:30,666 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0207 | Val rms_score: 0.9616
66
+ 2025-09-18 15:04:34,259 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0270 | Val rms_score: 0.9494
67
+ 2025-09-18 15:04:37,807 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0199 | Val rms_score: 0.9515
68
+ 2025-09-18 15:04:41,704 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0217 | Val rms_score: 0.9486
69
+ 2025-09-18 15:04:42,709 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0221 | Val rms_score: 0.9624
70
+ 2025-09-18 15:04:46,348 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0235 | Val rms_score: 0.9572
71
+ 2025-09-18 15:04:49,825 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0211 | Val rms_score: 0.9710
72
+ 2025-09-18 15:04:53,388 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0195 | Val rms_score: 0.9748
73
+ 2025-09-18 15:04:57,318 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0201 | Val rms_score: 0.9416
74
+ 2025-09-18 15:05:01,042 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0176 | Val rms_score: 0.9515
75
+ 2025-09-18 15:05:04,596 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0175 | Val rms_score: 0.9529
76
+ 2025-09-18 15:05:08,223 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0189 | Val rms_score: 0.9424
77
+ 2025-09-18 15:05:11,615 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0218 | Val rms_score: 0.9445
78
+ 2025-09-18 15:05:12,956 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0207 | Val rms_score: 0.9623
79
+ 2025-09-18 15:05:16,490 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0226 | Val rms_score: 0.9919
80
+ 2025-09-18 15:05:19,940 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0238 | Val rms_score: 0.9464
81
+ 2025-09-18 15:05:23,592 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0190 | Val rms_score: 0.9700
82
+ 2025-09-18 15:05:27,065 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0179 | Val rms_score: 0.9461
83
+ 2025-09-18 15:05:30,909 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0190 | Val rms_score: 0.9458
84
+ 2025-09-18 15:05:34,470 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0163 | Val rms_score: 0.9483
85
+ 2025-09-18 15:05:38,002 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0172 | Val rms_score: 0.9591
86
+ 2025-09-18 15:05:41,558 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0187 | Val rms_score: 0.9695
87
+ 2025-09-18 15:05:42,545 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0215 | Val rms_score: 0.9472
88
+ 2025-09-18 15:05:46,385 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0206 | Val rms_score: 0.9415
89
+ 2025-09-18 15:05:49,954 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0191 | Val rms_score: 0.9432
90
+ 2025-09-18 15:05:54,407 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0160 | Val rms_score: 0.9399
91
+ 2025-09-18 15:05:57,849 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0162 | Val rms_score: 0.9435
92
+ 2025-09-18 15:06:01,300 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0160 | Val rms_score: 0.9537
93
+ 2025-09-18 15:06:05,228 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0168 | Val rms_score: 0.9627
94
+ 2025-09-18 15:06:08,702 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0163 | Val rms_score: 0.9559
95
+ 2025-09-18 15:06:12,174 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0175 | Val rms_score: 0.9641
96
+ 2025-09-18 15:06:13,166 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0186 | Val rms_score: 0.9612
97
+ 2025-09-18 15:06:16,679 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0171 | Val rms_score: 0.9504
98
+ 2025-09-18 15:06:20,587 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0178 | Val rms_score: 0.9674
99
+ 2025-09-18 15:06:24,090 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0167 | Val rms_score: 0.9497
100
+ 2025-09-18 15:06:27,518 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0165 | Val rms_score: 0.9738
101
+ 2025-09-18 15:06:31,130 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0167 | Val rms_score: 0.9553
102
+ 2025-09-18 15:06:34,603 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0178 | Val rms_score: 0.9765
103
+ 2025-09-18 15:06:38,488 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0175 | Val rms_score: 0.9746
104
+ 2025-09-18 15:06:42,096 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0148 | Val rms_score: 0.9570
105
+ 2025-09-18 15:06:43,240 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0147 | Val rms_score: 0.9535
106
+ 2025-09-18 15:06:47,090 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0159 | Val rms_score: 0.9691
107
+ 2025-09-18 15:06:50,973 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0150 | Val rms_score: 0.9759
108
+ 2025-09-18 15:06:54,923 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0155 | Val rms_score: 0.9765
109
+ 2025-09-18 15:06:58,456 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0149 | Val rms_score: 0.9648
110
+ 2025-09-18 15:07:01,948 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0154 | Val rms_score: 0.9598
111
+ 2025-09-18 15:07:05,416 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0148 | Val rms_score: 0.9701
112
+ 2025-09-18 15:07:08,997 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0143 | Val rms_score: 0.9662
113
+ 2025-09-18 15:07:12,927 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0146 | Val rms_score: 0.9744
114
+ 2025-09-18 15:07:13,957 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0160 | Val rms_score: 0.9649
115
+ 2025-09-18 15:07:17,503 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0155 | Val rms_score: 0.9562
116
+ 2025-09-18 15:07:21,097 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0162 | Val rms_score: 0.9658
117
+ 2025-09-18 15:07:24,712 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0157 | Val rms_score: 0.9769
118
+ 2025-09-18 15:07:28,731 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0149 | Val rms_score: 0.9628
119
+ 2025-09-18 15:07:32,263 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0153 | Val rms_score: 0.9647
120
+ 2025-09-18 15:07:35,790 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0145 | Val rms_score: 0.9591
121
+ 2025-09-18 15:07:39,290 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0198 | Val rms_score: 0.9672
122
+ 2025-09-18 15:07:39,765 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.8883
123
+ 2025-09-18 15:07:40,041 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset delaney at 2025-09-18_15-07-40
124
+ 2025-09-18 15:07:40,655 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5000 | Val rms_score: 1.0776
125
+ 2025-09-18 15:07:40,655 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
126
+ 2025-09-18 15:07:41,468 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0776
127
+ 2025-09-18 15:07:45,254 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1412 | Val rms_score: 1.0946
128
+ 2025-09-18 15:07:48,612 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1137 | Val rms_score: 1.1153
129
+ 2025-09-18 15:07:52,225 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1040 | Val rms_score: 1.1029
130
+ 2025-09-18 15:07:55,894 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0824 | Val rms_score: 1.1130
131
+ 2025-09-18 15:07:59,533 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0717 | Val rms_score: 1.0557
132
+ 2025-09-18 15:07:59,931 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
133
+ 2025-09-18 15:08:00,533 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 1.0557
134
+ 2025-09-18 15:08:04,347 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0553 | Val rms_score: 1.0945
135
+ 2025-09-18 15:08:08,010 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0606 | Val rms_score: 1.0759
136
+ 2025-09-18 15:08:11,542 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0558 | Val rms_score: 1.1130
137
+ 2025-09-18 15:08:12,419 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0477 | Val rms_score: 1.0714
138
+ 2025-09-18 15:08:16,011 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0461 | Val rms_score: 1.0629
139
+ 2025-09-18 15:08:19,956 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0428 | Val rms_score: 1.0973
140
+ 2025-09-18 15:08:23,581 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0463 | Val rms_score: 1.0745
141
+ 2025-09-18 15:08:27,204 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0376 | Val rms_score: 1.0624
142
+ 2025-09-18 15:08:30,606 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0348 | Val rms_score: 1.0445
143
+ 2025-09-18 15:08:30,736 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 435
144
+ 2025-09-18 15:08:31,215 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val rms_score: 1.0445
145
+ 2025-09-18 15:08:34,701 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0298 | Val rms_score: 1.0065
146
+ 2025-09-18 15:08:35,132 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 464
147
+ 2025-09-18 15:08:35,624 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val rms_score: 1.0065
148
+ 2025-09-18 15:08:39,336 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0306 | Val rms_score: 1.0433
149
+ 2025-09-18 15:08:42,850 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0321 | Val rms_score: 1.0131
150
+ 2025-09-18 15:08:43,727 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0294 | Val rms_score: 1.0187
151
+ 2025-09-18 15:08:47,142 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0271 | Val rms_score: 1.0335
152
+ 2025-09-18 15:08:50,741 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0265 | Val rms_score: 1.0895
153
+ 2025-09-18 15:08:54,496 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0234 | Val rms_score: 1.0811
154
+ 2025-09-18 15:08:57,963 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0233 | Val rms_score: 1.0572
155
+ 2025-09-18 15:09:01,487 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0244 | Val rms_score: 1.0787
156
+ 2025-09-18 15:09:04,919 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0288 | Val rms_score: 1.0961
157
+ 2025-09-18 15:09:08,294 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0428 | Val rms_score: 0.9613
158
+ 2025-09-18 15:09:08,683 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 754
159
+ 2025-09-18 15:09:09,198 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 26 with val rms_score: 0.9613
160
+ 2025-09-18 15:09:12,868 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0308 | Val rms_score: 0.9742
161
+ 2025-09-18 15:09:13,780 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0275 | Val rms_score: 0.9396
162
+ 2025-09-18 15:09:13,958 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 812
163
+ 2025-09-18 15:09:14,449 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val rms_score: 0.9396
164
+ 2025-09-18 15:09:17,963 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0240 | Val rms_score: 0.9752
165
+ 2025-09-18 15:09:21,372 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0199 | Val rms_score: 0.9462
166
+ 2025-09-18 15:09:24,792 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0207 | Val rms_score: 0.9696
167
+ 2025-09-18 15:09:28,482 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0219 | Val rms_score: 0.9312
168
+ 2025-09-18 15:09:28,653 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 928
169
+ 2025-09-18 15:09:29,141 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 32 with val rms_score: 0.9312
170
+ 2025-09-18 15:09:32,699 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0242 | Val rms_score: 0.9843
171
+ 2025-09-18 15:09:36,062 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0205 | Val rms_score: 0.9596
172
+ 2025-09-18 15:09:40,441 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0219 | Val rms_score: 0.9846
173
+ 2025-09-18 15:09:41,397 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0220 | Val rms_score: 0.9664
174
+ 2025-09-18 15:09:45,280 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0199 | Val rms_score: 0.9454
175
+ 2025-09-18 15:09:48,724 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0215 | Val rms_score: 0.9429
176
+ 2025-09-18 15:09:52,113 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0183 | Val rms_score: 0.9630
177
+ 2025-09-18 15:09:55,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0191 | Val rms_score: 0.9631
178
+ 2025-09-18 15:09:59,096 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0214 | Val rms_score: 1.0045
179
+ 2025-09-18 15:10:02,842 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0209 | Val rms_score: 0.9674
180
+ 2025-09-18 15:10:06,473 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0187 | Val rms_score: 0.9513
181
+ 2025-09-18 15:10:09,929 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0237 | Val rms_score: 0.9692
182
+ 2025-09-18 15:10:10,840 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0167 | Val rms_score: 0.9607
183
+ 2025-09-18 15:10:14,207 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0174 | Val rms_score: 0.9895
184
+ 2025-09-18 15:10:17,927 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0171 | Val rms_score: 0.9644
185
+ 2025-09-18 15:10:21,384 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0163 | Val rms_score: 0.9672
186
+ 2025-09-18 15:10:24,882 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0186 | Val rms_score: 0.9698
187
+ 2025-09-18 15:10:28,466 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0166 | Val rms_score: 0.9772
188
+ 2025-09-18 15:10:32,053 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0148 | Val rms_score: 0.9663
189
+ 2025-09-18 15:10:36,049 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0172 | Val rms_score: 0.9922
190
+ 2025-09-18 15:10:39,527 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0180 | Val rms_score: 0.9561
191
+ 2025-09-18 15:10:42,946 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0168 | Val rms_score: 0.9721
192
+ 2025-09-18 15:10:43,973 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0157 | Val rms_score: 0.9583
193
+ 2025-09-18 15:10:47,571 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0177 | Val rms_score: 0.9580
194
+ 2025-09-18 15:10:51,538 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0190 | Val rms_score: 1.0229
195
+ 2025-09-18 15:10:55,071 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0205 | Val rms_score: 0.9513
196
+ 2025-09-18 15:10:58,636 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0211 | Val rms_score: 0.9619
197
+ 2025-09-18 15:11:02,134 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0169 | Val rms_score: 0.9514
198
+ 2025-09-18 15:11:05,648 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0166 | Val rms_score: 0.9854
199
+ 2025-09-18 15:11:09,337 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0164 | Val rms_score: 0.9693
200
+ 2025-09-18 15:11:12,856 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0177 | Val rms_score: 0.9805
201
+ 2025-09-18 15:11:13,843 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0155 | Val rms_score: 0.9773
202
+ 2025-09-18 15:11:17,341 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0165 | Val rms_score: 0.9792
203
+ 2025-09-18 15:11:20,689 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0168 | Val rms_score: 0.9776
204
+ 2025-09-18 15:11:24,547 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0158 | Val rms_score: 0.9725
205
+ 2025-09-18 15:11:28,117 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0166 | Val rms_score: 0.9810
206
+ 2025-09-18 15:11:32,468 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0170 | Val rms_score: 1.0063
207
+ 2025-09-18 15:11:35,778 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0160 | Val rms_score: 0.9772
208
+ 2025-09-18 15:11:39,114 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0171 | Val rms_score: 0.9962
209
+ 2025-09-18 15:11:42,853 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0158 | Val rms_score: 0.9817
210
+ 2025-09-18 15:11:43,950 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0167 | Val rms_score: 0.9759
211
+ 2025-09-18 15:11:47,418 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0164 | Val rms_score: 0.9612
212
+ 2025-09-18 15:11:50,900 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0154 | Val rms_score: 0.9686
213
+ 2025-09-18 15:11:54,405 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0151 | Val rms_score: 0.9723
214
+ 2025-09-18 15:11:58,312 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0143 | Val rms_score: 0.9609
215
+ 2025-09-18 15:12:01,785 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0150 | Val rms_score: 0.9712
216
+ 2025-09-18 15:12:05,280 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0153 | Val rms_score: 0.9704
217
+ 2025-09-18 15:12:08,814 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0162 | Val rms_score: 0.9965
218
+ 2025-09-18 15:12:12,318 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0152 | Val rms_score: 0.9538
219
+ 2025-09-18 15:12:13,796 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0164 | Val rms_score: 0.9972
220
+ 2025-09-18 15:12:17,238 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0174 | Val rms_score: 0.9998
221
+ 2025-09-18 15:12:20,590 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0158 | Val rms_score: 0.9526
222
+ 2025-09-18 15:12:24,080 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0158 | Val rms_score: 0.9570
223
+ 2025-09-18 15:12:27,529 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0174 | Val rms_score: 0.9705
224
+ 2025-09-18 15:12:31,233 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0158 | Val rms_score: 0.9753
225
+ 2025-09-18 15:12:34,577 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0166 | Val rms_score: 0.9753
226
+ 2025-09-18 15:12:37,915 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0149 | Val rms_score: 0.9732
227
+ 2025-09-18 15:12:41,345 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0140 | Val rms_score: 0.9860
228
+ 2025-09-18 15:12:42,244 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0133 | Val rms_score: 0.9708
229
+ 2025-09-18 15:12:45,941 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0143 | Val rms_score: 0.9675
230
+ 2025-09-18 15:12:49,283 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0158 | Val rms_score: 0.9850
231
+ 2025-09-18 15:12:52,963 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0184 | Val rms_score: 0.9775
232
+ 2025-09-18 15:12:56,322 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0152 | Val rms_score: 0.9729
233
+ 2025-09-18 15:12:59,695 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0154 | Val rms_score: 0.9767
234
+ 2025-09-18 15:13:03,454 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0146 | Val rms_score: 0.9933
235
+ 2025-09-18 15:13:06,908 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0127 | Val rms_score: 0.9786
236
+ 2025-09-18 15:13:10,275 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0126 | Val rms_score: 0.9723
237
+ 2025-09-18 15:13:11,229 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0149 | Val rms_score: 1.0012
238
+ 2025-09-18 15:13:11,669 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.7910
239
+ 2025-09-18 15:13:11,967 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset delaney at 2025-09-18_15-13-11
240
+ 2025-09-18 15:13:15,136 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.6724 | Val rms_score: 1.1004
241
+ 2025-09-18 15:13:15,136 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
242
+ 2025-09-18 15:13:15,905 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.1004
243
+ 2025-09-18 15:13:19,344 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1412 | Val rms_score: 1.0226
244
+ 2025-09-18 15:13:19,499 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
245
+ 2025-09-18 15:13:19,971 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0226
246
+ 2025-09-18 15:13:23,497 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1099 | Val rms_score: 1.0098
247
+ 2025-09-18 15:13:23,658 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
248
+ 2025-09-18 15:13:24,142 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.0098
249
+ 2025-09-18 15:13:27,703 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1001 | Val rms_score: 1.0090
250
+ 2025-09-18 15:13:27,867 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 116
251
+ 2025-09-18 15:13:28,361 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 1.0090
252
+ 2025-09-18 15:13:31,911 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0824 | Val rms_score: 0.9968
253
+ 2025-09-18 15:13:32,077 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
254
+ 2025-09-18 15:13:32,571 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.9968
255
+ 2025-09-18 15:13:36,132 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0690 | Val rms_score: 0.9857
256
+ 2025-09-18 15:13:36,553 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
257
+ 2025-09-18 15:13:37,042 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.9857
258
+ 2025-09-18 15:13:40,486 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0521 | Val rms_score: 1.0062
259
+ 2025-09-18 15:13:41,506 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0563 | Val rms_score: 0.9838
260
+ 2025-09-18 15:13:41,683 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 232
261
+ 2025-09-18 15:13:42,216 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.9838
262
+ 2025-09-18 15:13:45,778 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0533 | Val rms_score: 0.9748
263
+ 2025-09-18 15:13:45,950 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
264
+ 2025-09-18 15:13:46,473 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.9748
265
+ 2025-09-18 15:13:50,126 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0471 | Val rms_score: 0.9733
266
+ 2025-09-18 15:13:50,307 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 290
267
+ 2025-09-18 15:13:50,821 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.9733
268
+ 2025-09-18 15:13:54,420 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0456 | Val rms_score: 0.9909
269
+ 2025-09-18 15:13:58,272 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0388 | Val rms_score: 1.0063
270
+ 2025-09-18 15:14:01,820 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0399 | Val rms_score: 0.9692
271
+ 2025-09-18 15:14:02,035 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 377
272
+ 2025-09-18 15:14:02,549 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val rms_score: 0.9692
273
+ 2025-09-18 15:14:06,107 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0352 | Val rms_score: 1.0179
274
+ 2025-09-18 15:14:09,700 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0348 | Val rms_score: 1.0007
275
+ 2025-09-18 15:14:13,381 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0329 | Val rms_score: 0.9566
276
+ 2025-09-18 15:14:11,359 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 464
277
+ 2025-09-18 15:14:11,849 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val rms_score: 0.9566
278
+ 2025-09-18 15:14:15,582 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0315 | Val rms_score: 0.9816
279
+ 2025-09-18 15:14:19,224 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0344 | Val rms_score: 1.0046
280
+ 2025-09-18 15:14:22,815 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0272 | Val rms_score: 0.9843
281
+ 2025-09-18 15:14:26,325 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0263 | Val rms_score: 0.9954
282
+ 2025-09-18 15:14:30,009 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0268 | Val rms_score: 1.0021
283
+ 2025-09-18 15:14:33,876 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0271 | Val rms_score: 0.9653
284
+ 2025-09-18 15:14:37,458 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0290 | Val rms_score: 0.9796
285
+ 2025-09-18 15:14:41,154 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0271 | Val rms_score: 1.0011
286
+ 2025-09-18 15:14:42,133 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0252 | Val rms_score: 0.9936
287
+ 2025-09-18 15:14:45,705 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0257 | Val rms_score: 0.9887
288
+ 2025-09-18 15:14:49,426 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0244 | Val rms_score: 0.9717
289
+ 2025-09-18 15:14:52,916 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0234 | Val rms_score: 0.9902
290
+ 2025-09-18 15:14:56,375 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0246 | Val rms_score: 0.9802
291
+ 2025-09-18 15:14:59,812 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0244 | Val rms_score: 1.0020
292
+ 2025-09-18 15:15:03,177 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0252 | Val rms_score: 0.9915
293
+ 2025-09-18 15:15:06,906 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0271 | Val rms_score: 0.9711
294
+ 2025-09-18 15:15:10,348 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0299 | Val rms_score: 0.9815
295
+ 2025-09-18 15:15:11,327 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0242 | Val rms_score: 1.0314
296
+ 2025-09-18 15:15:15,764 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0240 | Val rms_score: 1.0095
297
+ 2025-09-18 15:15:19,214 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0201 | Val rms_score: 1.0126
298
+ 2025-09-18 15:15:23,131 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0228 | Val rms_score: 0.9602
299
+ 2025-09-18 15:15:26,571 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0203 | Val rms_score: 0.9916
300
+ 2025-09-18 15:15:30,183 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0185 | Val rms_score: 1.0097
301
+ 2025-09-18 15:15:33,693 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0187 | Val rms_score: 0.9881
302
+ 2025-09-18 15:15:37,399 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0180 | Val rms_score: 0.9672
303
+ 2025-09-18 15:15:41,290 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0203 | Val rms_score: 0.9972
304
+ 2025-09-18 15:15:42,405 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0216 | Val rms_score: 1.0132
305
+ 2025-09-18 15:15:45,779 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0218 | Val rms_score: 1.0365
306
+ 2025-09-18 15:15:49,201 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0205 | Val rms_score: 0.9754
307
+ 2025-09-18 15:15:52,827 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0179 | Val rms_score: 1.0174
308
+ 2025-09-18 15:15:56,743 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0189 | Val rms_score: 1.0073
309
+ 2025-09-18 15:16:00,279 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0213 | Val rms_score: 1.0121
310
+ 2025-09-18 15:16:03,711 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0189 | Val rms_score: 1.0134
311
+ 2025-09-18 15:16:07,227 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0213 | Val rms_score: 0.9995
312
+ 2025-09-18 15:16:10,739 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0185 | Val rms_score: 1.0311
313
+ 2025-09-18 15:16:12,084 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0181 | Val rms_score: 0.9889
314
+ 2025-09-18 15:16:15,756 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0205 | Val rms_score: 0.9879
315
+ 2025-09-18 15:16:19,355 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0216 | Val rms_score: 0.9855
316
+ 2025-09-18 15:16:23,018 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0182 | Val rms_score: 1.0039
317
+ 2025-09-18 15:16:26,470 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0177 | Val rms_score: 0.9921
318
+ 2025-09-18 15:16:30,337 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0194 | Val rms_score: 0.9870
319
+ 2025-09-18 15:16:34,132 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0185 | Val rms_score: 1.0161
320
+ 2025-09-18 15:16:37,677 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0165 | Val rms_score: 0.9944
321
+ 2025-09-18 15:16:41,287 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0174 | Val rms_score: 1.0048
322
+ 2025-09-18 15:16:42,271 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0160 | Val rms_score: 1.0120
323
+ 2025-09-18 15:16:46,069 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0174 | Val rms_score: 1.0162
324
+ 2025-09-18 15:16:49,678 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0177 | Val rms_score: 1.0023
325
+ 2025-09-18 15:16:53,119 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0174 | Val rms_score: 1.0078
326
+ 2025-09-18 15:16:56,614 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0164 | Val rms_score: 1.0211
327
+ 2025-09-18 15:17:00,133 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0165 | Val rms_score: 0.9763
328
+ 2025-09-18 15:17:03,962 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0230 | Val rms_score: 1.0258
329
+ 2025-09-18 15:17:07,658 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0205 | Val rms_score: 1.0275
330
+ 2025-09-18 15:17:12,260 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0154 | Val rms_score: 1.0332
331
+ 2025-09-18 15:17:13,265 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0179 | Val rms_score: 1.0369
332
+ 2025-09-18 15:17:16,667 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0170 | Val rms_score: 1.0097
333
+ 2025-09-18 15:17:20,354 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0174 | Val rms_score: 1.0129
334
+ 2025-09-18 15:17:23,837 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0152 | Val rms_score: 1.0053
335
+ 2025-09-18 15:17:27,262 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0168 | Val rms_score: 1.0221
336
+ 2025-09-18 15:17:30,683 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0156 | Val rms_score: 1.0050
337
+ 2025-09-18 15:17:34,137 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0214 | Val rms_score: 1.0410
338
+ 2025-09-18 15:17:37,843 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0172 | Val rms_score: 1.0056
339
+ 2025-09-18 15:17:41,333 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0167 | Val rms_score: 1.0199
340
+ 2025-09-18 15:17:42,210 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0144 | Val rms_score: 1.0212
341
+ 2025-09-18 15:17:45,613 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0162 | Val rms_score: 1.0333
342
+ 2025-09-18 15:17:49,063 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0155 | Val rms_score: 1.0226
343
+ 2025-09-18 15:17:52,738 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0142 | Val rms_score: 1.0052
344
+ 2025-09-18 15:17:56,197 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0144 | Val rms_score: 1.0150
345
+ 2025-09-18 15:17:59,640 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0142 | Val rms_score: 1.0448
346
+ 2025-09-18 15:18:03,068 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0165 | Val rms_score: 1.0364
347
+ 2025-09-18 15:18:06,451 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0172 | Val rms_score: 1.0411
348
+ 2025-09-18 15:18:10,283 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0177 | Val rms_score: 1.0209
349
+ 2025-09-18 15:18:13,823 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0160 | Val rms_score: 1.0214
350
+ 2025-09-18 15:18:14,783 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0152 | Val rms_score: 1.0116
351
+ 2025-09-18 15:18:18,242 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0160 | Val rms_score: 1.0442
352
+ 2025-09-18 15:18:21,622 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0152 | Val rms_score: 1.0382
353
+ 2025-09-18 15:18:25,537 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0166 | Val rms_score: 1.0407
354
+ 2025-09-18 15:18:29,080 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0152 | Val rms_score: 1.0242
355
+ 2025-09-18 15:18:32,424 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0133 | Val rms_score: 1.0494
356
+ 2025-09-18 15:18:35,857 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0137 | Val rms_score: 1.0540
357
+ 2025-09-18 15:18:39,319 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0150 | Val rms_score: 1.0252
358
+ 2025-09-18 15:18:43,071 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0170 | Val rms_score: 1.0215
359
+ 2025-09-18 15:18:43,942 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0155 | Val rms_score: 1.0198
360
+ 2025-09-18 15:18:47,708 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0147 | Val rms_score: 1.0289
361
+ 2025-09-18 15:18:51,247 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0140 | Val rms_score: 1.0465
362
+ 2025-09-18 15:18:51,730 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.8575
363
+ 2025-09-18 15:18:52,018 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.8456, Std Dev: 0.0406
logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_154545.log ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:45:45,728 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Running benchmark for dataset: freesolv
2
+ 2025-09-18 15:45:45,729 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - dataset: freesolv, tasks: ['y'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 15:45:45,734 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset freesolv at 2025-09-18_15-45-45
4
+ 2025-09-18 15:45:45,342 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.1324 | Val rms_score: 1.3771
5
+ 2025-09-18 15:45:45,342 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
6
+ 2025-09-18 15:45:45,990 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.3771
7
+ 2025-09-18 15:45:48,815 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3713 | Val rms_score: 1.3036
8
+ 2025-09-18 15:45:48,979 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
9
+ 2025-09-18 15:45:49,457 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.3036
10
+ 2025-09-18 15:45:51,949 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2684 | Val rms_score: 1.2223
11
+ 2025-09-18 15:45:52,113 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
12
+ 2025-09-18 15:45:52,604 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.2223
13
+ 2025-09-18 15:45:55,507 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2610 | Val rms_score: 1.1506
14
+ 2025-09-18 15:45:55,691 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
15
+ 2025-09-18 15:45:56,185 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 1.1506
16
+ 2025-09-18 15:45:58,767 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2224 | Val rms_score: 1.1427
17
+ 2025-09-18 15:45:58,943 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
18
+ 2025-09-18 15:45:59,449 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 1.1427
19
+ 2025-09-18 15:46:02,242 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1406 | Val rms_score: 1.1245
20
+ 2025-09-18 15:46:02,707 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 102
21
+ 2025-09-18 15:46:03,186 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 1.1245
22
+ 2025-09-18 15:46:05,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1562 | Val rms_score: 0.9971
23
+ 2025-09-18 15:46:06,040 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 119
24
+ 2025-09-18 15:46:06,536 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.9971
25
+ 2025-09-18 15:46:09,380 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1250 | Val rms_score: 0.9986
26
+ 2025-09-18 15:46:11,942 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1098 | Val rms_score: 0.9484
27
+ 2025-09-18 15:46:12,109 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 153
28
+ 2025-09-18 15:46:12,589 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.9484
29
+ 2025-09-18 15:46:15,379 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0855 | Val rms_score: 0.9023
30
+ 2025-09-18 15:46:15,542 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 170
31
+ 2025-09-18 15:46:16,024 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.9023
32
+ 2025-09-18 15:46:16,145 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0942 | Val rms_score: 0.7964
33
+ 2025-09-18 15:46:16,609 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 187
34
+ 2025-09-18 15:46:17,111 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.7964
35
+ 2025-09-18 15:46:20,041 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.3594 | Val rms_score: 0.9019
36
+ 2025-09-18 15:46:22,570 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2261 | Val rms_score: 0.8880
37
+ 2025-09-18 15:46:25,321 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1071 | Val rms_score: 0.8934
38
+ 2025-09-18 15:46:27,807 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0643 | Val rms_score: 0.8894
39
+ 2025-09-18 15:46:30,469 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0630 | Val rms_score: 0.8650
40
+ 2025-09-18 15:46:33,335 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0611 | Val rms_score: 0.8951
41
+ 2025-09-18 15:46:35,976 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0446 | Val rms_score: 0.8464
42
+ 2025-09-18 15:46:38,506 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0508 | Val rms_score: 0.8894
43
+ 2025-09-18 15:46:41,007 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0437 | Val rms_score: 0.8766
44
+ 2025-09-18 15:46:43,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0469 | Val rms_score: 0.8772
45
+ 2025-09-18 15:46:46,383 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0540 | Val rms_score: 0.8637
46
+ 2025-09-18 15:46:46,357 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0489 | Val rms_score: 0.8880
47
+ 2025-09-18 15:46:48,829 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0537 | Val rms_score: 0.9015
48
+ 2025-09-18 15:46:51,277 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0466 | Val rms_score: 0.8879
49
+ 2025-09-18 15:46:53,759 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0496 | Val rms_score: 0.8415
50
+ 2025-09-18 15:46:56,759 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0545 | Val rms_score: 0.8717
51
+ 2025-09-18 15:46:59,239 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0416 | Val rms_score: 0.8928
52
+ 2025-09-18 15:47:01,764 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0352 | Val rms_score: 0.8934
53
+ 2025-09-18 15:47:04,279 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0301 | Val rms_score: 0.8585
54
+ 2025-09-18 15:47:06,832 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0528 | Val rms_score: 0.9679
55
+ 2025-09-18 15:47:09,796 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1029 | Val rms_score: 0.9103
56
+ 2025-09-18 15:47:12,239 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0568 | Val rms_score: 0.8938
57
+ 2025-09-18 15:47:14,735 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0379 | Val rms_score: 0.8635
58
+ 2025-09-18 15:47:14,790 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0354 | Val rms_score: 0.8601
59
+ 2025-09-18 15:47:17,295 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0330 | Val rms_score: 0.8698
60
+ 2025-09-18 15:47:20,206 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0299 | Val rms_score: 0.8712
61
+ 2025-09-18 15:47:22,635 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0279 | Val rms_score: 0.8626
62
+ 2025-09-18 15:47:25,168 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0294 | Val rms_score: 0.8694
63
+ 2025-09-18 15:47:27,684 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0299 | Val rms_score: 0.8572
64
+ 2025-09-18 15:47:30,249 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0268 | Val rms_score: 0.8527
65
+ 2025-09-18 15:47:33,085 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0254 | Val rms_score: 0.8467
66
+ 2025-09-18 15:47:35,589 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0278 | Val rms_score: 0.8965
67
+ 2025-09-18 15:47:38,097 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0288 | Val rms_score: 0.8707
68
+ 2025-09-18 15:47:40,620 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0265 | Val rms_score: 0.8534
69
+ 2025-09-18 15:47:43,083 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0215 | Val rms_score: 0.8554
70
+ 2025-09-18 15:47:45,947 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0250 | Val rms_score: 0.8431
71
+ 2025-09-18 15:47:45,986 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0256 | Val rms_score: 0.8576
72
+ 2025-09-18 15:47:48,523 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0294 | Val rms_score: 0.8634
73
+ 2025-09-18 15:47:50,981 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0247 | Val rms_score: 0.8405
74
+ 2025-09-18 15:47:53,440 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0250 | Val rms_score: 0.8537
75
+ 2025-09-18 15:47:56,240 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0255 | Val rms_score: 0.8639
76
+ 2025-09-18 15:47:58,818 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0085 | Val rms_score: 0.8519
77
+ 2025-09-18 15:48:01,295 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0237 | Val rms_score: 0.8391
78
+ 2025-09-18 15:48:03,899 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0216 | Val rms_score: 0.8441
79
+ 2025-09-18 15:48:06,355 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0255 | Val rms_score: 0.8617
80
+ 2025-09-18 15:48:09,231 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0214 | Val rms_score: 0.8419
81
+ 2025-09-18 15:48:11,740 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0232 | Val rms_score: 0.8541
82
+ 2025-09-18 15:48:15,201 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0226 | Val rms_score: 0.8366
83
+ 2025-09-18 15:48:15,274 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0214 | Val rms_score: 0.8772
84
+ 2025-09-18 15:48:17,696 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0246 | Val rms_score: 0.8556
85
+ 2025-09-18 15:48:20,471 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0177 | Val rms_score: 0.8487
86
+ 2025-09-18 15:48:22,965 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0185 | Val rms_score: 0.8593
87
+ 2025-09-18 15:48:25,505 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0240 | Val rms_score: 0.8711
88
+ 2025-09-18 15:48:28,047 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0174 | Val rms_score: 0.8568
89
+ 2025-09-18 15:48:30,677 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0176 | Val rms_score: 0.8440
90
+ 2025-09-18 15:48:33,582 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0186 | Val rms_score: 0.8505
91
+ 2025-09-18 15:48:36,075 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0179 | Val rms_score: 0.8500
92
+ 2025-09-18 15:48:38,565 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0193 | Val rms_score: 0.8591
93
+ 2025-09-18 15:48:41,107 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0205 | Val rms_score: 0.8609
94
+ 2025-09-18 15:48:43,700 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0285 | Val rms_score: 0.8326
95
+ 2025-09-18 15:48:46,499 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0273 | Val rms_score: 0.8502
96
+ 2025-09-18 15:48:46,444 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0218 | Val rms_score: 0.8497
97
+ 2025-09-18 15:48:49,067 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0171 | Val rms_score: 0.8354
98
+ 2025-09-18 15:48:51,649 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0299 | Val rms_score: 0.9197
99
+ 2025-09-18 15:48:54,155 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0662 | Val rms_score: 0.9171
100
+ 2025-09-18 15:48:57,133 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0352 | Val rms_score: 0.8824
101
+ 2025-09-18 15:48:59,680 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0335 | Val rms_score: 0.8933
102
+ 2025-09-18 15:49:02,215 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0253 | Val rms_score: 0.8820
103
+ 2025-09-18 15:49:04,832 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0226 | Val rms_score: 0.8670
104
+ 2025-09-18 15:49:07,404 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0240 | Val rms_score: 0.8845
105
+ 2025-09-18 15:49:10,210 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0221 | Val rms_score: 0.8713
106
+ 2025-09-18 15:49:12,631 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0153 | Val rms_score: 0.8463
107
+ 2025-09-18 15:49:15,074 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0172 | Val rms_score: 0.8538
108
+ 2025-09-18 15:49:15,034 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0191 | Val rms_score: 0.8130
109
+ 2025-09-18 15:49:17,556 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0338 | Val rms_score: 0.8831
110
+ 2025-09-18 15:49:20,491 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0208 | Val rms_score: 0.8637
111
+ 2025-09-18 15:49:23,154 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0209 | Val rms_score: 0.8601
112
+ 2025-09-18 15:49:25,753 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0175 | Val rms_score: 0.8600
113
+ 2025-09-18 15:49:28,245 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0177 | Val rms_score: 0.8742
114
+ 2025-09-18 15:49:30,733 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0177 | Val rms_score: 0.8609
115
+ 2025-09-18 15:49:33,807 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0155 | Val rms_score: 0.8486
116
+ 2025-09-18 15:49:36,345 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0185 | Val rms_score: 0.8879
117
+ 2025-09-18 15:49:38,816 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0237 | Val rms_score: 0.8419
118
+ 2025-09-18 15:49:41,293 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0204 | Val rms_score: 0.8626
119
+ 2025-09-18 15:49:43,757 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0198 | Val rms_score: 0.8384
120
+ 2025-09-18 15:49:46,571 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0240 | Val rms_score: 0.8449
121
+ 2025-09-18 15:49:46,557 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0207 | Val rms_score: 0.8573
122
+ 2025-09-18 15:49:49,069 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0136 | Val rms_score: 0.8609
123
+ 2025-09-18 15:49:51,623 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0175 | Val rms_score: 0.8739
124
+ 2025-09-18 15:49:52,072 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5642
125
+ 2025-09-18 15:49:52,400 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset freesolv at 2025-09-18_15-49-52
126
+ 2025-09-18 15:49:54,551 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.4118 | Val rms_score: 1.3332
127
+ 2025-09-18 15:49:54,552 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
128
+ 2025-09-18 15:49:55,324 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.3332
129
+ 2025-09-18 15:49:57,859 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4099 | Val rms_score: 1.2115
130
+ 2025-09-18 15:49:58,031 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
131
+ 2025-09-18 15:49:58,534 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.2115
132
+ 2025-09-18 15:50:01,079 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2739 | Val rms_score: 1.1235
133
+ 2025-09-18 15:50:01,248 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
134
+ 2025-09-18 15:50:01,746 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.1235
135
+ 2025-09-18 15:50:04,283 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2233 | Val rms_score: 1.0940
136
+ 2025-09-18 15:50:04,452 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
137
+ 2025-09-18 15:50:04,960 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 1.0940
138
+ 2025-09-18 15:50:07,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1765 | Val rms_score: 1.0345
139
+ 2025-09-18 15:50:07,711 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
140
+ 2025-09-18 15:50:08,262 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 1.0345
141
+ 2025-09-18 15:50:10,781 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0581 | Val rms_score: 0.9973
142
+ 2025-09-18 15:50:11,237 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 102
143
+ 2025-09-18 15:50:11,721 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.9973
144
+ 2025-09-18 15:50:14,373 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1369 | Val rms_score: 0.9518
145
+ 2025-09-18 15:50:14,541 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 119
146
+ 2025-09-18 15:50:15,048 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.9518
147
+ 2025-09-18 15:50:15,093 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1176 | Val rms_score: 0.8829
148
+ 2025-09-18 15:50:15,283 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 136
149
+ 2025-09-18 15:50:15,793 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.8829
150
+ 2025-09-18 15:50:18,416 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0924 | Val rms_score: 0.8500
151
+ 2025-09-18 15:50:18,584 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 153
152
+ 2025-09-18 15:50:19,090 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.8500
153
+ 2025-09-18 15:50:21,562 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0763 | Val rms_score: 0.8132
154
+ 2025-09-18 15:50:21,730 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 170
155
+ 2025-09-18 15:50:22,276 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.8132
156
+ 2025-09-18 15:50:24,928 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0643 | Val rms_score: 0.8183
157
+ 2025-09-18 15:50:27,829 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0615 | Val rms_score: 0.7937
158
+ 2025-09-18 15:50:28,000 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 204
159
+ 2025-09-18 15:50:28,492 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 0.7937
160
+ 2025-09-18 15:50:31,027 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0515 | Val rms_score: 0.8146
161
+ 2025-09-18 15:50:33,518 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0699 | Val rms_score: 0.7449
162
+ 2025-09-18 15:50:33,686 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 238
163
+ 2025-09-18 15:50:34,201 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.7449
164
+ 2025-09-18 15:50:36,766 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1296 | Val rms_score: 0.8282
165
+ 2025-09-18 15:50:39,332 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1163 | Val rms_score: 0.8055
166
+ 2025-09-18 15:50:42,245 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0510 | Val rms_score: 0.8212
167
+ 2025-09-18 15:50:44,757 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0501 | Val rms_score: 0.7888
168
+ 2025-09-18 15:50:49,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0430 | Val rms_score: 0.7809
169
+ 2025-09-18 15:50:47,147 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0356 | Val rms_score: 0.8030
170
+ 2025-09-18 15:50:49,684 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0317 | Val rms_score: 0.7877
171
+ 2025-09-18 15:50:52,526 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0354 | Val rms_score: 0.7892
172
+ 2025-09-18 15:50:55,041 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0372 | Val rms_score: 0.7653
173
+ 2025-09-18 15:50:57,602 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0283 | Val rms_score: 0.7703
174
+ 2025-09-18 15:51:00,159 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0340 | Val rms_score: 0.8007
175
+ 2025-09-18 15:51:02,706 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0515 | Val rms_score: 0.7341
176
+ 2025-09-18 15:51:03,159 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 442
177
+ 2025-09-18 15:51:03,669 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 26 with val rms_score: 0.7341
178
+ 2025-09-18 15:51:06,418 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0717 | Val rms_score: 0.7888
179
+ 2025-09-18 15:51:09,101 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0476 | Val rms_score: 0.7622
180
+ 2025-09-18 15:51:11,605 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0308 | Val rms_score: 0.7764
181
+ 2025-09-18 15:51:14,106 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0270 | Val rms_score: 0.7766
182
+ 2025-09-18 15:51:16,681 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0271 | Val rms_score: 0.7767
183
+ 2025-09-18 15:51:17,048 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0257 | Val rms_score: 0.7638
184
+ 2025-09-18 15:51:19,575 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0270 | Val rms_score: 0.7725
185
+ 2025-09-18 15:51:22,060 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0299 | Val rms_score: 0.7590
186
+ 2025-09-18 15:51:24,611 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0356 | Val rms_score: 0.8070
187
+ 2025-09-18 15:51:27,168 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0329 | Val rms_score: 0.7537
188
+ 2025-09-18 15:51:30,045 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0386 | Val rms_score: 0.7592
189
+ 2025-09-18 15:51:32,610 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0319 | Val rms_score: 0.8167
190
+ 2025-09-18 15:51:35,105 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0388 | Val rms_score: 0.8324
191
+ 2025-09-18 15:51:37,810 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0312 | Val rms_score: 0.8232
192
+ 2025-09-18 15:51:40,459 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0213 | Val rms_score: 0.8132
193
+ 2025-09-18 15:51:43,356 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0205 | Val rms_score: 0.8182
194
+ 2025-09-18 15:51:45,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0202 | Val rms_score: 0.8218
195
+ 2025-09-18 15:51:45,880 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0193 | Val rms_score: 0.8253
196
+ 2025-09-18 15:51:48,458 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0208 | Val rms_score: 0.8149
197
+ 2025-09-18 15:51:50,938 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0218 | Val rms_score: 0.8187
198
+ 2025-09-18 15:51:53,751 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0226 | Val rms_score: 0.8086
199
+ 2025-09-18 15:51:56,246 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0197 | Val rms_score: 0.8150
200
+ 2025-09-18 15:51:58,708 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0156 | Val rms_score: 0.8157
201
+ 2025-09-18 15:52:01,418 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0171 | Val rms_score: 0.8030
202
+ 2025-09-18 15:52:03,908 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0164 | Val rms_score: 0.8051
203
+ 2025-09-18 15:52:06,852 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0306 | Val rms_score: 0.7728
204
+ 2025-09-18 15:52:09,410 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0236 | Val rms_score: 0.8143
205
+ 2025-09-18 15:52:11,837 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0423 | Val rms_score: 0.8071
206
+ 2025-09-18 15:52:14,284 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0234 | Val rms_score: 0.8150
207
+ 2025-09-18 15:52:16,783 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0210 | Val rms_score: 0.8086
208
+ 2025-09-18 15:52:17,145 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0207 | Val rms_score: 0.8045
209
+ 2025-09-18 15:52:19,715 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0294 | Val rms_score: 0.8505
210
+ 2025-09-18 15:52:23,202 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0366 | Val rms_score: 0.8367
211
+ 2025-09-18 15:52:25,716 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0292 | Val rms_score: 0.8371
212
+ 2025-09-18 15:52:28,280 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0233 | Val rms_score: 0.8420
213
+ 2025-09-18 15:52:31,172 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0271 | Val rms_score: 0.8276
214
+ 2025-09-18 15:52:33,641 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0187 | Val rms_score: 0.8247
215
+ 2025-09-18 15:52:36,049 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0178 | Val rms_score: 0.8197
216
+ 2025-09-18 15:52:38,589 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0133 | Val rms_score: 0.8176
217
+ 2025-09-18 15:52:41,158 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0173 | Val rms_score: 0.8182
218
+ 2025-09-18 15:52:44,006 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0233 | Val rms_score: 0.8259
219
+ 2025-09-18 15:52:46,516 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0176 | Val rms_score: 0.8332
220
+ 2025-09-18 15:52:46,517 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0169 | Val rms_score: 0.8199
221
+ 2025-09-18 15:52:49,061 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0146 | Val rms_score: 0.8210
222
+ 2025-09-18 15:52:51,547 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0187 | Val rms_score: 0.8409
223
+ 2025-09-18 15:52:54,442 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0206 | Val rms_score: 0.8340
224
+ 2025-09-18 15:52:56,878 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0177 | Val rms_score: 0.8402
225
+ 2025-09-18 15:52:59,424 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0157 | Val rms_score: 0.8435
226
+ 2025-09-18 15:53:01,944 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0147 | Val rms_score: 0.8186
227
+ 2025-09-18 15:53:04,496 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0145 | Val rms_score: 0.8255
228
+ 2025-09-18 15:53:07,472 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0161 | Val rms_score: 0.8342
229
+ 2025-09-18 15:53:10,158 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0276 | Val rms_score: 0.8194
230
+ 2025-09-18 15:53:12,715 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0170 | Val rms_score: 0.8124
231
+ 2025-09-18 15:53:15,268 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0120 | Val rms_score: 0.8162
232
+ 2025-09-18 15:53:15,277 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0134 | Val rms_score: 0.8173
233
+ 2025-09-18 15:53:18,120 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0140 | Val rms_score: 0.8178
234
+ 2025-09-18 15:53:20,637 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0138 | Val rms_score: 0.8095
235
+ 2025-09-18 15:53:23,176 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0111 | Val rms_score: 0.8137
236
+ 2025-09-18 15:53:25,817 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0118 | Val rms_score: 0.8212
237
+ 2025-09-18 15:53:28,456 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0142 | Val rms_score: 0.8205
238
+ 2025-09-18 15:53:31,409 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0208 | Val rms_score: 0.8168
239
+ 2025-09-18 15:53:33,948 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0187 | Val rms_score: 0.8221
240
+ 2025-09-18 15:53:36,481 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0164 | Val rms_score: 0.8229
241
+ 2025-09-18 15:53:38,993 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0160 | Val rms_score: 0.8191
242
+ 2025-09-18 15:53:41,480 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0129 | Val rms_score: 0.8076
243
+ 2025-09-18 15:53:44,391 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0124 | Val rms_score: 0.8152
244
+ 2025-09-18 15:53:46,933 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0144 | Val rms_score: 0.8086
245
+ 2025-09-18 15:53:46,894 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0152 | Val rms_score: 0.7986
246
+ 2025-09-18 15:53:49,424 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0150 | Val rms_score: 0.8162
247
+ 2025-09-18 15:53:51,879 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0124 | Val rms_score: 0.7945
248
+ 2025-09-18 15:53:54,637 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0149 | Val rms_score: 0.8052
249
+ 2025-09-18 15:53:57,152 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0124 | Val rms_score: 0.8004
250
+ 2025-09-18 15:53:59,737 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0130 | Val rms_score: 0.8027
251
+ 2025-09-18 15:54:02,386 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0117 | Val rms_score: 0.8030
252
+ 2025-09-18 15:54:02,916 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5317
253
+ 2025-09-18 15:54:03,237 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset freesolv at 2025-09-18_15-54-03
254
+ 2025-09-18 15:54:05,452 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.6912 | Val rms_score: 1.2348
255
+ 2025-09-18 15:54:05,452 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
256
+ 2025-09-18 15:54:05,976 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.2348
257
+ 2025-09-18 15:54:08,779 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4890 | Val rms_score: 1.2777
258
+ 2025-09-18 15:54:11,391 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3125 | Val rms_score: 1.1093
259
+ 2025-09-18 15:54:11,554 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
260
+ 2025-09-18 15:54:12,065 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.1093
261
+ 2025-09-18 15:54:14,905 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2335 | Val rms_score: 1.1110
262
+ 2025-09-18 15:54:20,121 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2031 | Val rms_score: 1.0825
263
+ 2025-09-18 15:54:15,100 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
264
+ 2025-09-18 15:54:15,596 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 1.0825
265
+ 2025-09-18 15:54:18,200 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1079 | Val rms_score: 1.0076
266
+ 2025-09-18 15:54:18,648 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 102
267
+ 2025-09-18 15:54:19,127 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 1.0076
268
+ 2025-09-18 15:54:21,708 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1710 | Val rms_score: 0.9993
269
+ 2025-09-18 15:54:21,874 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 119
270
+ 2025-09-18 15:54:22,361 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.9993
271
+ 2025-09-18 15:54:24,930 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1471 | Val rms_score: 0.9456
272
+ 2025-09-18 15:54:25,097 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 136
273
+ 2025-09-18 15:54:25,577 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.9456
274
+ 2025-09-18 15:54:28,071 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1153 | Val rms_score: 0.9032
275
+ 2025-09-18 15:54:28,238 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 153
276
+ 2025-09-18 15:54:28,721 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.9032
277
+ 2025-09-18 15:54:31,333 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1025 | Val rms_score: 0.8683
278
+ 2025-09-18 15:54:31,503 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 170
279
+ 2025-09-18 15:54:32,004 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.8683
280
+ 2025-09-18 15:54:34,685 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0910 | Val rms_score: 0.8159
281
+ 2025-09-18 15:54:35,166 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 187
282
+ 2025-09-18 15:54:35,683 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.8159
283
+ 2025-09-18 15:54:38,456 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0557 | Val rms_score: 0.8202
284
+ 2025-09-18 15:54:40,874 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0813 | Val rms_score: 0.8145
285
+ 2025-09-18 15:54:41,041 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 221
286
+ 2025-09-18 15:54:41,531 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val rms_score: 0.8145
287
+ 2025-09-18 15:54:44,233 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0772 | Val rms_score: 0.7813
288
+ 2025-09-18 15:54:44,401 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 238
289
+ 2025-09-18 15:54:44,878 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.7813
290
+ 2025-09-18 15:54:44,926 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0744 | Val rms_score: 0.7815
291
+ 2025-09-18 15:54:47,435 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0639 | Val rms_score: 0.8183
292
+ 2025-09-18 15:54:50,450 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0487 | Val rms_score: 0.7908
293
+ 2025-09-18 15:54:52,985 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0566 | Val rms_score: 0.7508
294
+ 2025-09-18 15:54:53,152 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 306
295
+ 2025-09-18 15:54:53,639 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 18 with val rms_score: 0.7508
296
+ 2025-09-18 15:54:56,229 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0517 | Val rms_score: 0.8312
297
+ 2025-09-18 15:54:58,826 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0508 | Val rms_score: 0.8160
298
+ 2025-09-18 15:55:01,389 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0455 | Val rms_score: 0.7715
299
+ 2025-09-18 15:55:04,268 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0404 | Val rms_score: 0.8043
300
+ 2025-09-18 15:55:06,769 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0393 | Val rms_score: 0.8156
301
+ 2025-09-18 15:55:09,366 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0347 | Val rms_score: 0.7671
302
+ 2025-09-18 15:55:12,059 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0375 | Val rms_score: 0.8293
303
+ 2025-09-18 15:55:14,530 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0335 | Val rms_score: 0.7911
304
+ 2025-09-18 15:55:14,928 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0329 | Val rms_score: 0.7938
305
+ 2025-09-18 15:55:17,401 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0319 | Val rms_score: 0.8188
306
+ 2025-09-18 15:55:19,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0331 | Val rms_score: 0.7902
307
+ 2025-09-18 15:55:22,459 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0297 | Val rms_score: 0.8082
308
+ 2025-09-18 15:55:24,920 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0308 | Val rms_score: 0.8108
309
+ 2025-09-18 15:55:27,758 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0324 | Val rms_score: 0.8052
310
+ 2025-09-18 15:55:30,211 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0352 | Val rms_score: 0.7863
311
+ 2025-09-18 15:55:32,767 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0335 | Val rms_score: 0.8300
312
+ 2025-09-18 15:55:35,456 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0283 | Val rms_score: 0.8149
313
+ 2025-09-18 15:55:38,109 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0280 | Val rms_score: 0.8219
314
+ 2025-09-18 15:55:41,102 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0281 | Val rms_score: 0.8286
315
+ 2025-09-18 15:55:43,698 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0395 | Val rms_score: 0.7764
316
+ 2025-09-18 15:55:46,343 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0317 | Val rms_score: 0.7967
317
+ 2025-09-18 15:55:46,466 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0267 | Val rms_score: 0.8215
318
+ 2025-09-18 15:55:49,095 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0262 | Val rms_score: 0.7921
319
+ 2025-09-18 15:55:52,120 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0225 | Val rms_score: 0.8230
320
+ 2025-09-18 15:55:54,793 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0221 | Val rms_score: 0.7972
321
+ 2025-09-18 15:55:57,321 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0232 | Val rms_score: 0.8173
322
+ 2025-09-18 15:55:59,830 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0214 | Val rms_score: 0.8197
323
+ 2025-09-18 15:56:02,397 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0256 | Val rms_score: 0.8152
324
+ 2025-09-18 15:56:05,202 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0283 | Val rms_score: 0.8286
325
+ 2025-09-18 15:56:07,722 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0258 | Val rms_score: 0.8194
326
+ 2025-09-18 15:56:10,172 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0213 | Val rms_score: 0.8169
327
+ 2025-09-18 15:56:12,736 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0200 | Val rms_score: 0.8445
328
+ 2025-09-18 15:56:15,402 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0308 | Val rms_score: 0.7741
329
+ 2025-09-18 15:56:15,718 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0280 | Val rms_score: 0.8344
330
+ 2025-09-18 15:56:18,187 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0108 | Val rms_score: 0.8080
331
+ 2025-09-18 15:56:20,630 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0207 | Val rms_score: 0.8242
332
+ 2025-09-18 15:56:23,172 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0195 | Val rms_score: 0.8174
333
+ 2025-09-18 15:56:25,685 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0242 | Val rms_score: 0.8075
334
+ 2025-09-18 15:56:28,586 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0213 | Val rms_score: 0.8237
335
+ 2025-09-18 15:56:31,045 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0186 | Val rms_score: 0.7954
336
+ 2025-09-18 15:56:34,549 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0189 | Val rms_score: 0.8175
337
+ 2025-09-18 15:56:37,161 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0200 | Val rms_score: 0.8329
338
+ 2025-09-18 15:56:39,686 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0182 | Val rms_score: 0.8160
339
+ 2025-09-18 15:56:42,597 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0179 | Val rms_score: 0.7863
340
+ 2025-09-18 15:56:45,266 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0170 | Val rms_score: 0.8123
341
+ 2025-09-18 15:56:45,334 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0163 | Val rms_score: 0.8124
342
+ 2025-09-18 15:56:47,888 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0250 | Val rms_score: 0.7979
343
+ 2025-09-18 15:56:50,380 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0246 | Val rms_score: 0.8180
344
+ 2025-09-18 15:56:53,355 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0205 | Val rms_score: 0.7686
345
+ 2025-09-18 15:56:55,945 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0249 | Val rms_score: 0.9671
346
+ 2025-09-18 15:56:58,380 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0685 | Val rms_score: 0.9448
347
+ 2025-09-18 15:57:00,900 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0439 | Val rms_score: 0.7940
348
+ 2025-09-18 15:57:03,444 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0292 | Val rms_score: 0.8446
349
+ 2025-09-18 15:57:06,248 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0269 | Val rms_score: 0.8435
350
+ 2025-09-18 15:57:08,791 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0200 | Val rms_score: 0.8124
351
+ 2025-09-18 15:57:11,259 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0201 | Val rms_score: 0.8326
352
+ 2025-09-18 15:57:13,733 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0184 | Val rms_score: 0.8353
353
+ 2025-09-18 15:57:16,208 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0196 | Val rms_score: 0.8267
354
+ 2025-09-18 15:57:16,490 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0188 | Val rms_score: 0.8301
355
+ 2025-09-18 15:57:18,922 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0179 | Val rms_score: 0.8369
356
+ 2025-09-18 15:57:21,484 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0142 | Val rms_score: 0.8211
357
+ 2025-09-18 15:57:24,047 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0161 | Val rms_score: 0.8070
358
+ 2025-09-18 15:57:26,486 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0152 | Val rms_score: 0.8027
359
+ 2025-09-18 15:57:29,268 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0160 | Val rms_score: 0.8462
360
+ 2025-09-18 15:57:31,811 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0172 | Val rms_score: 0.8100
361
+ 2025-09-18 15:57:34,377 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0155 | Val rms_score: 0.8032
362
+ 2025-09-18 15:57:36,966 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0199 | Val rms_score: 0.7908
363
+ 2025-09-18 15:57:39,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0249 | Val rms_score: 0.8429
364
+ 2025-09-18 15:57:42,428 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0171 | Val rms_score: 0.8271
365
+ 2025-09-18 15:57:45,014 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0138 | Val rms_score: 0.8184
366
+ 2025-09-18 15:57:47,501 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0141 | Val rms_score: 0.8211
367
+ 2025-09-18 15:57:47,507 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0140 | Val rms_score: 0.8266
368
+ 2025-09-18 15:57:50,185 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0171 | Val rms_score: 0.8270
369
+ 2025-09-18 15:57:53,030 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0186 | Val rms_score: 0.7982
370
+ 2025-09-18 15:57:55,489 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0167 | Val rms_score: 0.8228
371
+ 2025-09-18 15:57:57,977 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0185 | Val rms_score: 0.8457
372
+ 2025-09-18 15:58:00,448 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0174 | Val rms_score: 0.8174
373
+ 2025-09-18 15:58:02,897 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0198 | Val rms_score: 0.8116
374
+ 2025-09-18 15:58:05,964 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0182 | Val rms_score: 0.7967
375
+ 2025-09-18 15:58:08,553 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0188 | Val rms_score: 0.8103
376
+ 2025-09-18 15:58:11,006 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0184 | Val rms_score: 0.8120
377
+ 2025-09-18 15:58:13,420 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0143 | Val rms_score: 0.8073
378
+ 2025-09-18 15:58:13,888 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5516
379
+ 2025-09-18 15:58:14,210 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.5491, Std Dev: 0.0134
logs_modchembert_regression_ModChemBERT-MLM/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_155814.log ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 15:58:14,211 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Running benchmark for dataset: lipo
2
+ 2025-09-18 15:58:14,211 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - dataset: lipo, tasks: ['exp'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 15:58:14,218 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset lipo at 2025-09-18_15-58-14
4
+ 2025-09-18 15:58:21,944 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5406 | Val rms_score: 1.0003
5
+ 2025-09-18 15:58:21,944 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
6
+ 2025-09-18 15:58:22,561 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0003
7
+ 2025-09-18 15:58:33,971 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3438 | Val rms_score: 0.7823
8
+ 2025-09-18 15:58:34,103 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
9
+ 2025-09-18 15:58:34,592 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7823
10
+ 2025-09-18 15:58:46,081 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2750 | Val rms_score: 0.7549
11
+ 2025-09-18 15:58:46,246 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
12
+ 2025-09-18 15:58:46,739 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.7549
13
+ 2025-09-18 15:58:55,743 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2109 | Val rms_score: 0.7516
14
+ 2025-09-18 15:58:55,910 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
15
+ 2025-09-18 15:58:56,395 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.7516
16
+ 2025-09-18 15:59:07,801 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1875 | Val rms_score: 0.7729
17
+ 2025-09-18 15:59:16,802 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1531 | Val rms_score: 0.7570
18
+ 2025-09-18 15:59:28,304 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1286 | Val rms_score: 0.7473
19
+ 2025-09-18 15:59:28,439 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 735
20
+ 2025-09-18 15:59:28,926 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.7473
21
+ 2025-09-18 15:59:40,450 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1172 | Val rms_score: 0.7539
22
+ 2025-09-18 15:59:49,258 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1076 | Val rms_score: 0.7608
23
+ 2025-09-18 16:00:01,550 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0938 | Val rms_score: 0.7473
24
+ 2025-09-18 16:00:12,870 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0875 | Val rms_score: 0.7763
25
+ 2025-09-18 16:00:22,143 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0823 | Val rms_score: 0.7885
26
+ 2025-09-18 16:00:33,542 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0822 | Val rms_score: 0.7878
27
+ 2025-09-18 16:00:44,816 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0777 | Val rms_score: 0.7656
28
+ 2025-09-18 16:00:53,696 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0729 | Val rms_score: 0.7644
29
+ 2025-09-18 16:01:05,216 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0797 | Val rms_score: 0.7693
30
+ 2025-09-18 16:01:17,087 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0695 | Val rms_score: 0.7681
31
+ 2025-09-18 16:01:26,185 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0681 | Val rms_score: 0.7724
32
+ 2025-09-18 16:01:37,548 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0648 | Val rms_score: 0.7783
33
+ 2025-09-18 16:01:47,651 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0647 | Val rms_score: 0.7653
34
+ 2025-09-18 16:01:59,138 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0602 | Val rms_score: 0.7460
35
+ 2025-09-18 16:01:59,566 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2205
36
+ 2025-09-18 16:02:00,069 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.7460
37
+ 2025-09-18 16:02:11,788 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0691 | Val rms_score: 0.7666
38
+ 2025-09-18 16:02:20,951 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0630 | Val rms_score: 0.7589
39
+ 2025-09-18 16:02:32,517 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0664 | Val rms_score: 0.7585
40
+ 2025-09-18 16:02:43,882 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0584 | Val rms_score: 0.7777
41
+ 2025-09-18 16:02:52,838 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0589 | Val rms_score: 0.7619
42
+ 2025-09-18 16:03:04,444 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0567 | Val rms_score: 0.7700
43
+ 2025-09-18 16:03:15,949 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0563 | Val rms_score: 0.7765
44
+ 2025-09-18 16:03:25,690 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0563 | Val rms_score: 0.7746
45
+ 2025-09-18 16:03:37,112 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0525 | Val rms_score: 0.7735
46
+ 2025-09-18 16:03:46,035 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0574 | Val rms_score: 0.7604
47
+ 2025-09-18 16:03:57,794 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0563 | Val rms_score: 0.7635
48
+ 2025-09-18 16:04:09,161 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0531 | Val rms_score: 0.7819
49
+ 2025-09-18 16:04:23,193 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0518 | Val rms_score: 0.7684
50
+ 2025-09-18 16:04:34,440 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0533 | Val rms_score: 0.7565
51
+ 2025-09-18 16:04:39,333 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0547 | Val rms_score: 0.7551
52
+ 2025-09-18 16:04:50,965 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0507 | Val rms_score: 0.7730
53
+ 2025-09-18 16:05:02,354 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0524 | Val rms_score: 0.7911
54
+ 2025-09-18 16:05:12,182 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0523 | Val rms_score: 0.7783
55
+ 2025-09-18 16:05:23,514 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0547 | Val rms_score: 0.7466
56
+ 2025-09-18 16:05:32,381 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0422 | Val rms_score: 0.7572
57
+ 2025-09-18 16:05:44,239 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0582 | Val rms_score: 0.7592
58
+ 2025-09-18 16:05:55,646 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0544 | Val rms_score: 0.7687
59
+ 2025-09-18 16:06:04,465 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0477 | Val rms_score: 0.7679
60
+ 2025-09-18 16:06:16,146 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0444 | Val rms_score: 0.7561
61
+ 2025-09-18 16:06:27,340 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0479 | Val rms_score: 0.7690
62
+ 2025-09-18 16:06:36,284 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0484 | Val rms_score: 0.7684
63
+ 2025-09-18 16:06:48,553 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0486 | Val rms_score: 0.7484
64
+ 2025-09-18 16:06:59,921 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0490 | Val rms_score: 0.7612
65
+ 2025-09-18 16:07:08,605 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0481 | Val rms_score: 0.7665
66
+ 2025-09-18 16:07:19,827 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0457 | Val rms_score: 0.7665
67
+ 2025-09-18 16:07:31,347 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0521 | Val rms_score: 0.7532
68
+ 2025-09-18 16:07:40,443 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0502 | Val rms_score: 0.7614
69
+ 2025-09-18 16:07:51,827 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0489 | Val rms_score: 0.7738
70
+ 2025-09-18 16:08:03,207 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0492 | Val rms_score: 0.7573
71
+ 2025-09-18 16:08:11,970 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0480 | Val rms_score: 0.7715
72
+ 2025-09-18 16:08:23,656 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0447 | Val rms_score: 0.7745
73
+ 2025-09-18 16:08:33,612 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0465 | Val rms_score: 0.7465
74
+ 2025-09-18 16:08:44,867 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0484 | Val rms_score: 0.7788
75
+ 2025-09-18 16:08:56,387 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0478 | Val rms_score: 0.7530
76
+ 2025-09-18 16:09:05,290 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0355 | Val rms_score: 0.7640
77
+ 2025-09-18 16:09:17,008 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0594 | Val rms_score: 0.7554
78
+ 2025-09-18 16:09:28,445 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0453 | Val rms_score: 0.7502
79
+ 2025-09-18 16:09:37,280 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0467 | Val rms_score: 0.7497
80
+ 2025-09-18 16:09:48,616 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0450 | Val rms_score: 0.7632
81
+ 2025-09-18 16:09:59,766 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0466 | Val rms_score: 0.7575
82
+ 2025-09-18 16:10:10,011 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0487 | Val rms_score: 0.7605
83
+ 2025-09-18 16:10:21,432 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0453 | Val rms_score: 0.7669
84
+ 2025-09-18 16:10:32,874 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0524 | Val rms_score: 0.7443
85
+ 2025-09-18 16:10:33,012 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 7245
86
+ 2025-09-18 16:10:31,040 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 69 with val rms_score: 0.7443
87
+ 2025-09-18 16:10:42,338 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0478 | Val rms_score: 0.7508
88
+ 2025-09-18 16:10:53,660 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0469 | Val rms_score: 0.7541
89
+ 2025-09-18 16:11:02,892 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0448 | Val rms_score: 0.7507
90
+ 2025-09-18 16:11:14,327 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0450 | Val rms_score: 0.7553
91
+ 2025-09-18 16:11:25,718 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0464 | Val rms_score: 0.7731
92
+ 2025-09-18 16:11:34,499 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0469 | Val rms_score: 0.7431
93
+ 2025-09-18 16:11:34,641 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 7875
94
+ 2025-09-18 16:11:35,139 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 75 with val rms_score: 0.7431
95
+ 2025-09-18 16:11:46,318 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0455 | Val rms_score: 0.7528
96
+ 2025-09-18 16:11:59,035 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0449 | Val rms_score: 0.7457
97
+ 2025-09-18 16:12:07,927 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0444 | Val rms_score: 0.7487
98
+ 2025-09-18 16:12:19,391 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0461 | Val rms_score: 0.7861
99
+ 2025-09-18 16:12:30,904 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0456 | Val rms_score: 0.7696
100
+ 2025-09-18 16:12:39,977 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0482 | Val rms_score: 0.7611
101
+ 2025-09-18 16:12:51,775 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0504 | Val rms_score: 0.7518
102
+ 2025-09-18 16:13:03,265 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0529 | Val rms_score: 0.7599
103
+ 2025-09-18 16:13:12,272 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0527 | Val rms_score: 0.7530
104
+ 2025-09-18 16:13:23,882 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0500 | Val rms_score: 0.7583
105
+ 2025-09-18 16:13:33,890 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0435 | Val rms_score: 0.7519
106
+ 2025-09-18 16:13:45,730 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0413 | Val rms_score: 0.7603
107
+ 2025-09-18 16:13:57,067 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0488 | Val rms_score: 0.7590
108
+ 2025-09-18 16:14:05,869 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0465 | Val rms_score: 0.7603
109
+ 2025-09-18 16:14:17,148 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0462 | Val rms_score: 0.7443
110
+ 2025-09-18 16:14:28,553 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0440 | Val rms_score: 0.7600
111
+ 2025-09-18 16:14:37,722 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0471 | Val rms_score: 0.7557
112
+ 2025-09-18 16:14:49,300 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0466 | Val rms_score: 0.7576
113
+ 2025-09-18 16:15:00,808 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0487 | Val rms_score: 0.7593
114
+ 2025-09-18 16:15:09,712 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0429 | Val rms_score: 0.7602
115
+ 2025-09-18 16:15:22,047 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0465 | Val rms_score: 0.7529
116
+ 2025-09-18 16:15:33,545 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0441 | Val rms_score: 0.7671
117
+ 2025-09-18 16:15:42,203 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0436 | Val rms_score: 0.7541
118
+ 2025-09-18 16:15:53,475 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0444 | Val rms_score: 0.7585
119
+ 2025-09-18 16:16:02,273 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0437 | Val rms_score: 0.7551
120
+ 2025-09-18 16:16:03,108 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.7084
121
+ 2025-09-18 16:16:03,430 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset lipo at 2025-09-18_16-16-03
122
+ 2025-09-18 16:16:13,666 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5344 | Val rms_score: 0.9043
123
+ 2025-09-18 16:16:13,666 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
124
+ 2025-09-18 16:16:14,158 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.9043
125
+ 2025-09-18 16:16:25,854 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4313 | Val rms_score: 0.8291
126
+ 2025-09-18 16:16:26,017 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
127
+ 2025-09-18 16:16:26,501 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.8291
128
+ 2025-09-18 16:16:35,378 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3125 | Val rms_score: 0.8474
129
+ 2025-09-18 16:16:46,477 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2234 | Val rms_score: 0.7585
130
+ 2025-09-18 16:16:46,611 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
131
+ 2025-09-18 16:16:47,097 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.7585
132
+ 2025-09-18 16:16:58,587 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1812 | Val rms_score: 0.7778
133
+ 2025-09-18 16:17:07,329 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1750 | Val rms_score: 0.7320
134
+ 2025-09-18 16:17:07,780 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 630
135
+ 2025-09-18 16:17:08,271 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.7320
136
+ 2025-09-18 16:17:19,523 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1616 | Val rms_score: 0.7438
137
+ 2025-09-18 16:17:30,839 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1266 | Val rms_score: 0.7362
138
+ 2025-09-18 16:17:39,440 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1153 | Val rms_score: 0.7181
139
+ 2025-09-18 16:17:39,574 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 945
140
+ 2025-09-18 16:17:40,057 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.7181
141
+ 2025-09-18 16:17:52,508 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1125 | Val rms_score: 0.7334
142
+ 2025-09-18 16:18:03,716 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0966 | Val rms_score: 0.7197
143
+ 2025-09-18 16:18:12,796 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0932 | Val rms_score: 0.7172
144
+ 2025-09-18 16:18:12,929 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1260
145
+ 2025-09-18 16:18:13,416 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 0.7172
146
+ 2025-09-18 16:18:24,784 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0957 | Val rms_score: 0.7199
147
+ 2025-09-18 16:18:33,591 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0888 | Val rms_score: 0.7113
148
+ 2025-09-18 16:18:33,767 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1470
149
+ 2025-09-18 16:18:34,282 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.7113
150
+ 2025-09-18 16:18:45,476 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0833 | Val rms_score: 0.7128
151
+ 2025-09-18 16:18:56,663 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0859 | Val rms_score: 0.7228
152
+ 2025-09-18 16:19:05,673 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0772 | Val rms_score: 0.7196
153
+ 2025-09-18 16:19:16,894 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0729 | Val rms_score: 0.7308
154
+ 2025-09-18 16:19:28,003 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0734 | Val rms_score: 0.7463
155
+ 2025-09-18 16:19:37,622 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0731 | Val rms_score: 0.7357
156
+ 2025-09-18 16:19:48,763 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0609 | Val rms_score: 0.7052
157
+ 2025-09-18 16:19:49,202 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2205
158
+ 2025-09-18 16:19:49,700 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.7052
159
+ 2025-09-18 16:20:01,051 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0598 | Val rms_score: 0.7119
160
+ 2025-09-18 16:20:09,742 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0693 | Val rms_score: 0.7326
161
+ 2025-09-18 16:20:20,926 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0633 | Val rms_score: 0.7249
162
+ 2025-09-18 16:20:31,985 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0634 | Val rms_score: 0.7152
163
+ 2025-09-18 16:20:40,668 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0693 | Val rms_score: 0.7075
164
+ 2025-09-18 16:20:52,314 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0647 | Val rms_score: 0.7047
165
+ 2025-09-18 16:20:52,452 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2835
166
+ 2025-09-18 16:20:52,970 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val rms_score: 0.7047
167
+ 2025-09-18 16:21:04,202 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0664 | Val rms_score: 0.7065
168
+ 2025-09-18 16:21:14,103 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0649 | Val rms_score: 0.7164
169
+ 2025-09-18 16:21:25,278 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0616 | Val rms_score: 0.7245
170
+ 2025-09-18 16:21:34,440 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0574 | Val rms_score: 0.7156
171
+ 2025-09-18 16:21:46,366 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0576 | Val rms_score: 0.7150
172
+ 2025-09-18 16:21:57,722 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0563 | Val rms_score: 0.7183
173
+ 2025-09-18 16:22:06,805 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0558 | Val rms_score: 0.7065
174
+ 2025-09-18 16:22:18,219 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0554 | Val rms_score: 0.7117
175
+ 2025-09-18 16:22:29,479 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0543 | Val rms_score: 0.7126
176
+ 2025-09-18 16:22:38,633 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0559 | Val rms_score: 0.7255
177
+ 2025-09-18 16:22:50,091 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0556 | Val rms_score: 0.7145
178
+ 2025-09-18 16:23:02,725 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0536 | Val rms_score: 0.7129
179
+ 2025-09-18 16:23:11,518 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0528 | Val rms_score: 0.7099
180
+ 2025-09-18 16:23:22,963 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0543 | Val rms_score: 0.7064
181
+ 2025-09-18 16:23:37,789 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0531 | Val rms_score: 0.7192
182
+ 2025-09-18 16:23:44,279 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0565 | Val rms_score: 0.7229
183
+ 2025-09-18 16:23:55,997 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0578 | Val rms_score: 0.7094
184
+ 2025-09-18 16:24:05,065 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0494 | Val rms_score: 0.7078
185
+ 2025-09-18 16:24:16,617 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0518 | Val rms_score: 0.7127
186
+ 2025-09-18 16:24:28,576 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0571 | Val rms_score: 0.7088
187
+ 2025-09-18 16:24:38,620 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0559 | Val rms_score: 0.7028
188
+ 2025-09-18 16:24:38,774 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 5040
189
+ 2025-09-18 16:24:39,358 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 48 with val rms_score: 0.7028
190
+ 2025-09-18 16:24:51,207 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0503 | Val rms_score: 0.7113
191
+ 2025-09-18 16:25:02,820 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0553 | Val rms_score: 0.7168
192
+ 2025-09-18 16:25:11,910 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0534 | Val rms_score: 0.7097
193
+ 2025-09-18 16:25:23,693 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0547 | Val rms_score: 0.7106
194
+ 2025-09-18 16:25:32,859 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0512 | Val rms_score: 0.7189
195
+ 2025-09-18 16:25:44,432 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0538 | Val rms_score: 0.7167
196
+ 2025-09-18 16:25:55,923 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0537 | Val rms_score: 0.7128
197
+ 2025-09-18 16:26:04,903 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0494 | Val rms_score: 0.7164
198
+ 2025-09-18 16:26:16,737 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0504 | Val rms_score: 0.7142
199
+ 2025-09-18 16:26:29,224 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0503 | Val rms_score: 0.7102
200
+ 2025-09-18 16:26:38,131 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0467 | Val rms_score: 0.7130
201
+ 2025-09-18 16:26:49,543 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0488 | Val rms_score: 0.7054
202
+ 2025-09-18 16:27:00,949 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0455 | Val rms_score: 0.7108
203
+ 2025-09-18 16:27:10,308 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0465 | Val rms_score: 0.7103
204
+ 2025-09-18 16:27:21,548 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0542 | Val rms_score: 0.7106
205
+ 2025-09-18 16:27:33,131 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0482 | Val rms_score: 0.7147
206
+ 2025-09-18 16:27:42,036 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0547 | Val rms_score: 0.7020
207
+ 2025-09-18 16:27:42,176 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 6825
208
+ 2025-09-18 16:27:42,718 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 65 with val rms_score: 0.7020
209
+ 2025-09-18 16:27:54,418 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0534 | Val rms_score: 0.7261
210
+ 2025-09-18 16:28:04,901 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0509 | Val rms_score: 0.7046
211
+ 2025-09-18 16:28:16,534 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0459 | Val rms_score: 0.7047
212
+ 2025-09-18 16:28:28,080 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0483 | Val rms_score: 0.7217
213
+ 2025-09-18 16:28:36,978 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0500 | Val rms_score: 0.7074
214
+ 2025-09-18 16:28:48,516 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0483 | Val rms_score: 0.7045
215
+ 2025-09-18 16:29:00,344 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0492 | Val rms_score: 0.7062
216
+ 2025-09-18 16:29:09,414 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0488 | Val rms_score: 0.7057
217
+ 2025-09-18 16:29:20,932 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0487 | Val rms_score: 0.7087
218
+ 2025-09-18 16:29:32,185 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0498 | Val rms_score: 0.7073
219
+ 2025-09-18 16:29:40,957 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0539 | Val rms_score: 0.7265
220
+ 2025-09-18 16:29:53,739 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0465 | Val rms_score: 0.6987
221
+ 2025-09-18 16:29:53,883 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 8085
222
+ 2025-09-18 16:29:54,404 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 77 with val rms_score: 0.6987
223
+ 2025-09-18 16:30:03,727 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0462 | Val rms_score: 0.7038
224
+ 2025-09-18 16:30:15,265 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0454 | Val rms_score: 0.7009
225
+ 2025-09-18 16:30:27,136 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0478 | Val rms_score: 0.7145
226
+ 2025-09-18 16:30:36,194 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0414 | Val rms_score: 0.7048
227
+ 2025-09-18 16:30:48,084 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0463 | Val rms_score: 0.7033
228
+ 2025-09-18 16:30:59,492 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0469 | Val rms_score: 0.6996
229
+ 2025-09-18 16:31:08,338 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0473 | Val rms_score: 0.7154
230
+ 2025-09-18 16:31:19,825 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0472 | Val rms_score: 0.7068
231
+ 2025-09-18 16:31:32,135 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0497 | Val rms_score: 0.7071
232
+ 2025-09-18 16:31:41,364 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0520 | Val rms_score: 0.6971
233
+ 2025-09-18 16:31:41,547 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 9135
234
+ 2025-09-18 16:31:42,202 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 87 with val rms_score: 0.6971
235
+ 2025-09-18 16:31:53,603 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0459 | Val rms_score: 0.7184
236
+ 2025-09-18 16:32:05,122 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0455 | Val rms_score: 0.7154
237
+ 2025-09-18 16:32:14,137 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0447 | Val rms_score: 0.7092
238
+ 2025-09-18 16:32:25,756 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0455 | Val rms_score: 0.7153
239
+ 2025-09-18 16:32:35,398 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0484 | Val rms_score: 0.7020
240
+ 2025-09-18 16:32:46,956 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0462 | Val rms_score: 0.6944
241
+ 2025-09-18 16:32:47,111 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 9765
242
+ 2025-09-18 16:32:47,669 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 93 with val rms_score: 0.6944
243
+ 2025-09-18 16:32:59,304 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0482 | Val rms_score: 0.7007
244
+ 2025-09-18 16:33:08,348 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0490 | Val rms_score: 0.7123
245
+ 2025-09-18 16:33:20,897 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0463 | Val rms_score: 0.7062
246
+ 2025-09-18 16:33:32,812 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0474 | Val rms_score: 0.7260
247
+ 2025-09-18 16:33:41,965 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0486 | Val rms_score: 0.7023
248
+ 2025-09-18 16:33:53,400 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0454 | Val rms_score: 0.7067
249
+ 2025-09-18 16:34:04,711 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0469 | Val rms_score: 0.7011
250
+ 2025-09-18 16:34:05,534 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.7231
251
+ 2025-09-18 16:34:03,424 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset lipo at 2025-09-18_16-34-03
252
+ 2025-09-18 16:34:13,716 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.7469 | Val rms_score: 0.9654
253
+ 2025-09-18 16:34:13,716 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
254
+ 2025-09-18 16:34:14,578 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.9654
255
+ 2025-09-18 16:34:26,308 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4875 | Val rms_score: 0.8465
256
+ 2025-09-18 16:34:26,475 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
257
+ 2025-09-18 16:34:26,954 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.8465
258
+ 2025-09-18 16:34:35,818 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3187 | Val rms_score: 0.7931
259
+ 2025-09-18 16:34:35,986 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
260
+ 2025-09-18 16:34:36,489 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.7931
261
+ 2025-09-18 16:34:48,373 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2531 | Val rms_score: 0.7660
262
+ 2025-09-18 16:34:48,550 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
263
+ 2025-09-18 16:34:49,034 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.7660
264
+ 2025-09-18 16:35:00,268 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2250 | Val rms_score: 0.7581
265
+ 2025-09-18 16:35:00,439 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 525
266
+ 2025-09-18 16:35:00,949 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.7581
267
+ 2025-09-18 16:35:09,788 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1875 | Val rms_score: 0.7741
268
+ 2025-09-18 16:35:21,739 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1554 | Val rms_score: 0.7629
269
+ 2025-09-18 16:35:33,105 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1266 | Val rms_score: 0.7765
270
+ 2025-09-18 16:35:42,103 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1111 | Val rms_score: 0.7594
271
+ 2025-09-18 16:35:54,572 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1062 | Val rms_score: 0.7544
272
+ 2025-09-18 16:35:54,710 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1050
273
+ 2025-09-18 16:35:55,224 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.7544
274
+ 2025-09-18 16:36:04,330 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0983 | Val rms_score: 0.7511
275
+ 2025-09-18 16:36:04,786 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1155
276
+ 2025-09-18 16:36:05,313 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.7511
277
+ 2025-09-18 16:36:16,835 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0964 | Val rms_score: 0.7555
278
+ 2025-09-18 16:36:28,221 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0909 | Val rms_score: 0.7694
279
+ 2025-09-18 16:36:37,084 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0813 | Val rms_score: 0.7615
280
+ 2025-09-18 16:36:48,448 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0758 | Val rms_score: 0.7551
281
+ 2025-09-18 16:36:59,951 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0770 | Val rms_score: 0.7876
282
+ 2025-09-18 16:37:09,247 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0728 | Val rms_score: 0.7699
283
+ 2025-09-18 16:37:20,758 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0771 | Val rms_score: 0.7610
284
+ 2025-09-18 16:37:32,309 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0645 | Val rms_score: 0.7598
285
+ 2025-09-18 16:37:42,351 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0681 | Val rms_score: 0.7487
286
+ 2025-09-18 16:37:42,496 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2100
287
+ 2025-09-18 16:37:43,002 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val rms_score: 0.7487
288
+ 2025-09-18 16:37:54,685 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0684 | Val rms_score: 0.7643
289
+ 2025-09-18 16:38:04,065 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0648 | Val rms_score: 0.7623
290
+ 2025-09-18 16:38:15,507 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0672 | Val rms_score: 0.7711
291
+ 2025-09-18 16:38:27,122 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0598 | Val rms_score: 0.7505
292
+ 2025-09-18 16:38:36,154 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0609 | Val rms_score: 0.7495
293
+ 2025-09-18 16:38:47,623 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0643 | Val rms_score: 0.7731
294
+ 2025-09-18 16:38:59,467 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0621 | Val rms_score: 0.7634
295
+ 2025-09-18 16:39:08,447 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0625 | Val rms_score: 0.7660
296
+ 2025-09-18 16:39:21,015 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0590 | Val rms_score: 0.7653
297
+ 2025-09-18 16:39:32,449 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0591 | Val rms_score: 0.7582
298
+ 2025-09-18 16:39:41,351 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0608 | Val rms_score: 0.7691
299
+ 2025-09-18 16:39:53,077 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0531 | Val rms_score: 0.7756
300
+ 2025-09-18 16:40:04,552 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0567 | Val rms_score: 0.7591
301
+ 2025-09-18 16:40:13,805 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0563 | Val rms_score: 0.7525
302
+ 2025-09-18 16:40:25,499 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0521 | Val rms_score: 0.7627
303
+ 2025-09-18 16:40:34,685 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0523 | Val rms_score: 0.7622
304
+ 2025-09-18 16:40:46,679 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0507 | Val rms_score: 0.7603
305
+ 2025-09-18 16:40:58,439 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0542 | Val rms_score: 0.7543
306
+ 2025-09-18 16:41:08,576 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0507 | Val rms_score: 0.7563
307
+ 2025-09-18 16:41:20,136 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0506 | Val rms_score: 0.7518
308
+ 2025-09-18 16:41:31,633 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0455 | Val rms_score: 0.7543
309
+ 2025-09-18 16:41:40,960 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0551 | Val rms_score: 0.7540
310
+ 2025-09-18 16:41:52,503 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0516 | Val rms_score: 0.7605
311
+ 2025-09-18 16:42:03,978 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0482 | Val rms_score: 0.7592
312
+ 2025-09-18 16:42:13,190 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0537 | Val rms_score: 0.7712
313
+ 2025-09-18 16:42:24,587 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0484 | Val rms_score: 0.7537
314
+ 2025-09-18 16:42:36,348 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0536 | Val rms_score: 0.7506
315
+ 2025-09-18 16:42:46,849 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0551 | Val rms_score: 0.7583
316
+ 2025-09-18 16:42:59,854 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0521 | Val rms_score: 0.7433
317
+ 2025-09-18 16:43:00,004 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 5145
318
+ 2025-09-18 16:43:00,595 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 49 with val rms_score: 0.7433
319
+ 2025-09-18 16:43:10,417 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0497 | Val rms_score: 0.7542
320
+ 2025-09-18 16:43:22,301 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0520 | Val rms_score: 0.7702
321
+ 2025-09-18 16:43:34,462 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0503 | Val rms_score: 0.7583
322
+ 2025-09-18 16:43:43,527 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0493 | Val rms_score: 0.7538
323
+ 2025-09-18 16:43:55,292 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0489 | Val rms_score: 0.7569
324
+ 2025-09-18 16:44:04,447 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0471 | Val rms_score: 0.7445
325
+ 2025-09-18 16:44:15,944 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0543 | Val rms_score: 0.7462
326
+ 2025-09-18 16:44:28,053 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0496 | Val rms_score: 0.7553
327
+ 2025-09-18 16:44:38,251 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0531 | Val rms_score: 0.7745
328
+ 2025-09-18 16:44:49,788 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0490 | Val rms_score: 0.7532
329
+ 2025-09-18 16:45:01,622 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0466 | Val rms_score: 0.7583
330
+ 2025-09-18 16:45:11,192 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0523 | Val rms_score: 0.7604
331
+ 2025-09-18 16:45:23,288 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0482 | Val rms_score: 0.7502
332
+ 2025-09-18 16:45:34,797 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0557 | Val rms_score: 0.7564
333
+ 2025-09-18 16:45:44,183 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0467 | Val rms_score: 0.7549
334
+ 2025-09-18 16:45:55,721 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0488 | Val rms_score: 0.7632
335
+ 2025-09-18 16:46:04,760 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0474 | Val rms_score: 0.7569
336
+ 2025-09-18 16:46:17,540 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0493 | Val rms_score: 0.7630
337
+ 2025-09-18 16:46:29,065 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0484 | Val rms_score: 0.7505
338
+ 2025-09-18 16:46:37,942 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0483 | Val rms_score: 0.7569
339
+ 2025-09-18 16:46:49,537 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0497 | Val rms_score: 0.7642
340
+ 2025-09-18 16:47:00,938 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0474 | Val rms_score: 0.7610
341
+ 2025-09-18 16:47:10,361 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0490 | Val rms_score: 0.7508
342
+ 2025-09-18 16:47:21,834 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0490 | Val rms_score: 0.7499
343
+ 2025-09-18 16:47:33,241 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0473 | Val rms_score: 0.7688
344
+ 2025-09-18 16:47:42,338 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0469 | Val rms_score: 0.7531
345
+ 2025-09-18 16:47:54,263 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0475 | Val rms_score: 0.7496
346
+ 2025-09-18 16:48:11,114 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0496 | Val rms_score: 0.7498
347
+ 2025-09-18 16:48:30,114 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0448 | Val rms_score: 0.7516
348
+ 2025-09-18 16:48:47,345 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0464 | Val rms_score: 0.7549
349
+ 2025-09-18 16:49:06,557 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0494 | Val rms_score: 0.7481
350
+ 2025-09-18 16:49:23,432 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0408 | Val rms_score: 0.7578
351
+ 2025-09-18 16:49:40,007 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0500 | Val rms_score: 0.7590
352
+ 2025-09-18 16:49:59,800 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0445 | Val rms_score: 0.7615
353
+ 2025-09-18 16:50:16,871 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0512 | Val rms_score: 0.7619
354
+ 2025-09-18 16:50:36,372 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0478 | Val rms_score: 0.7639
355
+ 2025-09-18 16:50:54,770 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0466 | Val rms_score: 0.7618
356
+ 2025-09-18 16:51:12,101 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0475 | Val rms_score: 0.7543
357
+ 2025-09-18 16:51:31,186 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0486 | Val rms_score: 0.7553
358
+ 2025-09-18 16:51:48,079 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0432 | Val rms_score: 0.7554
359
+ 2025-09-18 16:52:05,371 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0447 | Val rms_score: 0.7616
360
+ 2025-09-18 16:52:24,596 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0469 | Val rms_score: 0.7555
361
+ 2025-09-18 16:52:41,789 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0474 | Val rms_score: 0.7496
362
+ 2025-09-18 16:53:01,736 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0437 | Val rms_score: 0.7582
363
+ 2025-09-18 16:53:18,350 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0460 | Val rms_score: 0.7555
364
+ 2025-09-18 16:53:37,340 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0469 | Val rms_score: 0.7500
365
+ 2025-09-18 16:53:55,882 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0445 | Val rms_score: 0.7483
366
+ 2025-09-18 16:54:12,742 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0469 | Val rms_score: 0.7579
367
+ 2025-09-18 16:54:32,011 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0444 | Val rms_score: 0.7550
368
+ 2025-09-18 16:54:49,447 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0437 | Val rms_score: 0.7453
369
+ 2025-09-18 16:55:05,595 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0447 | Val rms_score: 0.7391
370
+ 2025-09-18 16:55:05,865 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 10500
371
+ 2025-09-18 16:55:06,473 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 100 with val rms_score: 0.7391
372
+ 2025-09-18 16:55:07,426 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.7126
373
+ 2025-09-18 16:55:07,844 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.7147, Std Dev: 0.0062
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4639c891069a720d370c6be880d7dbc6db2a27d394601501fe2e8d0e933c54e3
3
+ size 450968512
modeling_modchembert.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # This file is adapted from the transformers library.
19
+ # Modifications include:
20
+ # - Additional classifier_pooling options for ModChemBertForSequenceClassification
21
+ # - sum_mean, sum_sum, mean_sum, mean_mean: from ChemLM (utilizes all hidden states)
22
+ # - max_cls, cls_mha, max_seq_mha: from MaxPoolBERT (utilizes last k hidden states)
23
+ # - max_seq_mean: a merge between sum_mean and max_cls (utilizes last k hidden states)
24
+ # - Addition of ModChemBertPoolingAttention for cls_mha and max_seq_mha pooling options
25
+
26
+ import copy
27
+ import math
28
+ import typing
29
+ from contextlib import nullcontext
30
+
31
+ import torch
32
+ import torch.nn as nn
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
35
+ from transformers.modeling_outputs import MaskedLMOutput, SequenceClassifierOutput
36
+ from transformers.models.modernbert.modeling_modernbert import (
37
+ MODERNBERT_ATTENTION_FUNCTION,
38
+ ModernBertModel,
39
+ ModernBertPredictionHead,
40
+ ModernBertPreTrainedModel,
41
+ ModernBertRotaryEmbedding,
42
+ _pad_modernbert_output,
43
+ _unpad_modernbert_input,
44
+ )
45
+ from transformers.utils import logging
46
+
47
+ from .configuration_modchembert import ModChemBertConfig
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class InitWeightsMixin:
53
+ def _init_weights(self, module: nn.Module):
54
+ super()._init_weights(module) # type: ignore
55
+
56
+ cutoff_factor = self.config.initializer_cutoff_factor # type: ignore
57
+ if cutoff_factor is None:
58
+ cutoff_factor = 3
59
+
60
+ def init_weight(module: nn.Module, std: float):
61
+ if isinstance(module, nn.Linear):
62
+ nn.init.trunc_normal_(
63
+ module.weight,
64
+ mean=0.0,
65
+ std=std,
66
+ a=-cutoff_factor * std,
67
+ b=cutoff_factor * std,
68
+ )
69
+ if module.bias is not None:
70
+ nn.init.zeros_(module.bias)
71
+
72
+ stds = {
73
+ "in": self.config.initializer_range, # type: ignore
74
+ "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), # type: ignore
75
+ "final_out": self.config.hidden_size**-0.5, # type: ignore
76
+ }
77
+
78
+ if isinstance(module, ModChemBertForMaskedLM):
79
+ init_weight(module.decoder, stds["out"])
80
+ elif isinstance(module, ModChemBertForSequenceClassification):
81
+ init_weight(module.classifier, stds["final_out"])
82
+ elif isinstance(module, ModChemBertPoolingAttention):
83
+ init_weight(module.Wq, stds["in"])
84
+ init_weight(module.Wk, stds["in"])
85
+ init_weight(module.Wv, stds["in"])
86
+ init_weight(module.Wo, stds["out"])
87
+
88
+
89
+ class ModChemBertPoolingAttention(nn.Module):
90
+ """Performs multi-headed self attention on a batch of sequences."""
91
+
92
+ def __init__(self, config: ModChemBertConfig):
93
+ super().__init__()
94
+ self.config = copy.deepcopy(config)
95
+ # Override num_attention_heads to use classifier_pooling_num_attention_heads
96
+ self.config.num_attention_heads = config.classifier_pooling_num_attention_heads
97
+ # Override attention_dropout to use classifier_pooling_attention_dropout
98
+ self.config.attention_dropout = config.classifier_pooling_attention_dropout
99
+
100
+ if config.hidden_size % config.num_attention_heads != 0:
101
+ raise ValueError(
102
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads "
103
+ f"({config.num_attention_heads})"
104
+ )
105
+
106
+ self.attention_dropout = config.attention_dropout
107
+ self.num_heads = config.num_attention_heads
108
+ self.head_dim = config.hidden_size // config.num_attention_heads
109
+ self.all_head_size = self.head_dim * self.num_heads
110
+ self.Wq = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
111
+ self.Wk = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
112
+ self.Wv = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
113
+
114
+ # Use global attention
115
+ self.local_attention = (-1, -1)
116
+ rope_theta = config.global_rope_theta
117
+ # sdpa path from original ModernBert implementation
118
+ config_copy = copy.deepcopy(config)
119
+ config_copy.rope_theta = rope_theta
120
+ self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy)
121
+
122
+ self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
123
+ self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity()
124
+ self.pruned_heads = set()
125
+
126
+ def forward(
127
+ self,
128
+ q: torch.Tensor,
129
+ kv: torch.Tensor,
130
+ attention_mask: torch.Tensor | None = None,
131
+ **kwargs,
132
+ ) -> torch.Tensor:
133
+ bs, seq_len = kv.shape[:2]
134
+ q_proj: torch.Tensor = self.Wq(q)
135
+ k_proj: torch.Tensor = self.Wk(kv)
136
+ v_proj: torch.Tensor = self.Wv(kv)
137
+ qkv = torch.stack(
138
+ (
139
+ q_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
140
+ k_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
141
+ v_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
142
+ ),
143
+ dim=2,
144
+ ) # (bs, seq_len, 3, num_heads, head_dim)
145
+
146
+ device = kv.device
147
+ if attention_mask is None:
148
+ attention_mask = torch.ones((bs, seq_len), device=device, dtype=torch.bool)
149
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).long()
150
+
151
+ attn_outputs = MODERNBERT_ATTENTION_FUNCTION["sdpa"](
152
+ self,
153
+ qkv=qkv,
154
+ attention_mask=_prepare_4d_attention_mask(attention_mask, kv.dtype),
155
+ sliding_window_mask=None, # not needed when using global attention
156
+ position_ids=position_ids,
157
+ local_attention=self.local_attention,
158
+ bs=bs,
159
+ dim=self.all_head_size,
160
+ **kwargs,
161
+ )
162
+ hidden_states = attn_outputs[0]
163
+ hidden_states = self.out_drop(self.Wo(hidden_states))
164
+
165
+ return hidden_states
166
+
167
+
168
+ class ModChemBertForMaskedLM(InitWeightsMixin, ModernBertPreTrainedModel):
169
+ config_class = ModChemBertConfig
170
+ _tied_weights_keys = ["decoder.weight"]
171
+
172
+ def __init__(self, config: ModChemBertConfig):
173
+ super().__init__(config)
174
+ self.config = config
175
+ self.model = ModernBertModel(config)
176
+ self.head = ModernBertPredictionHead(config)
177
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
178
+
179
+ self.sparse_prediction = self.config.sparse_prediction
180
+ self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index
181
+
182
+ # Initialize weights and apply final processing
183
+ self.post_init()
184
+
185
+ def get_output_embeddings(self):
186
+ return self.decoder
187
+
188
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
189
+ self.decoder = new_embeddings
190
+
191
+ @torch.compile(dynamic=True)
192
+ def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
193
+ return self.decoder(self.head(output))
194
+
195
+ def forward(
196
+ self,
197
+ input_ids: torch.LongTensor | None = None,
198
+ attention_mask: torch.Tensor | None = None,
199
+ sliding_window_mask: torch.Tensor | None = None,
200
+ position_ids: torch.Tensor | None = None,
201
+ inputs_embeds: torch.Tensor | None = None,
202
+ labels: torch.Tensor | None = None,
203
+ indices: torch.Tensor | None = None,
204
+ cu_seqlens: torch.Tensor | None = None,
205
+ max_seqlen: int | None = None,
206
+ batch_size: int | None = None,
207
+ seq_len: int | None = None,
208
+ output_attentions: bool | None = None,
209
+ output_hidden_states: bool | None = None,
210
+ return_dict: bool | None = None,
211
+ **kwargs,
212
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | MaskedLMOutput:
213
+ r"""
214
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
215
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
216
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
217
+ far-away tokens in the local attention layers when not using Flash Attention.
218
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
219
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
220
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
221
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
222
+ max_seqlen (`int`, *optional*):
223
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
224
+ batch_size (`int`, *optional*):
225
+ Batch size of the input sequences. Used to pad the output tensors.
226
+ seq_len (`int`, *optional*):
227
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
228
+ """
229
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
230
+ self._maybe_set_compile()
231
+
232
+ if self.config._attn_implementation == "flash_attention_2": # noqa: SIM102
233
+ if indices is None and cu_seqlens is None and max_seqlen is None:
234
+ if batch_size is None and seq_len is None:
235
+ if inputs_embeds is not None:
236
+ batch_size, seq_len = inputs_embeds.shape[:2]
237
+ else:
238
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
239
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
240
+
241
+ if attention_mask is None:
242
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
243
+
244
+ if inputs_embeds is None:
245
+ with torch.no_grad():
246
+ input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
247
+ inputs=input_ids, # type: ignore
248
+ attention_mask=attention_mask, # type: ignore
249
+ position_ids=position_ids,
250
+ labels=labels,
251
+ )
252
+ else:
253
+ inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
254
+ inputs=inputs_embeds,
255
+ attention_mask=attention_mask, # type: ignore
256
+ position_ids=position_ids,
257
+ labels=labels,
258
+ )
259
+
260
+ outputs = self.model(
261
+ input_ids=input_ids,
262
+ attention_mask=attention_mask,
263
+ sliding_window_mask=sliding_window_mask,
264
+ position_ids=position_ids,
265
+ inputs_embeds=inputs_embeds,
266
+ indices=indices,
267
+ cu_seqlens=cu_seqlens,
268
+ max_seqlen=max_seqlen,
269
+ batch_size=batch_size,
270
+ seq_len=seq_len,
271
+ output_attentions=output_attentions,
272
+ output_hidden_states=output_hidden_states,
273
+ return_dict=return_dict,
274
+ )
275
+ last_hidden_state = outputs[0]
276
+
277
+ if self.sparse_prediction and labels is not None:
278
+ # flatten labels and output first
279
+ labels = labels.view(-1)
280
+ last_hidden_state = last_hidden_state.view(labels.shape[0], -1)
281
+
282
+ # then filter out the non-masked tokens
283
+ mask_tokens = labels != self.sparse_pred_ignore_index
284
+ last_hidden_state = last_hidden_state[mask_tokens]
285
+ labels = labels[mask_tokens]
286
+
287
+ logits = (
288
+ self.compiled_head(last_hidden_state)
289
+ if self.config.reference_compile
290
+ else self.decoder(self.head(last_hidden_state))
291
+ )
292
+
293
+ loss = None
294
+ if labels is not None:
295
+ loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
296
+
297
+ if self.config._attn_implementation == "flash_attention_2":
298
+ with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad():
299
+ logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) # type: ignore
300
+
301
+ if not return_dict:
302
+ output = (logits,)
303
+ return ((loss,) + output) if loss is not None else output
304
+
305
+ return MaskedLMOutput(
306
+ loss=loss,
307
+ logits=typing.cast(torch.FloatTensor, logits),
308
+ hidden_states=outputs.hidden_states,
309
+ attentions=outputs.attentions,
310
+ )
311
+
312
+
313
+ class ModChemBertForSequenceClassification(InitWeightsMixin, ModernBertPreTrainedModel):
314
+ config_class = ModChemBertConfig
315
+
316
+ def __init__(self, config: ModChemBertConfig):
317
+ super().__init__(config)
318
+ self.num_labels = config.num_labels
319
+ self.config = config
320
+
321
+ self.model = ModernBertModel(config)
322
+ if self.config.classifier_pooling in {"cls_mha", "max_seq_mha"}:
323
+ self.pooling_attn = ModChemBertPoolingAttention(config=self.config)
324
+ else:
325
+ self.pooling_attn = None
326
+ self.head = ModernBertPredictionHead(config)
327
+ self.drop = torch.nn.Dropout(config.classifier_dropout)
328
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
329
+
330
+ # Initialize weights and apply final processing
331
+ self.post_init()
332
+
333
+ def forward(
334
+ self,
335
+ input_ids: torch.LongTensor | None = None,
336
+ attention_mask: torch.Tensor | None = None,
337
+ sliding_window_mask: torch.Tensor | None = None,
338
+ position_ids: torch.Tensor | None = None,
339
+ inputs_embeds: torch.Tensor | None = None,
340
+ labels: torch.Tensor | None = None,
341
+ indices: torch.Tensor | None = None,
342
+ cu_seqlens: torch.Tensor | None = None,
343
+ max_seqlen: int | None = None,
344
+ batch_size: int | None = None,
345
+ seq_len: int | None = None,
346
+ output_attentions: bool | None = None,
347
+ output_hidden_states: bool | None = None,
348
+ return_dict: bool | None = None,
349
+ **kwargs,
350
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | SequenceClassifierOutput:
351
+ r"""
352
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
353
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
354
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
355
+ far-away tokens in the local attention layers when not using Flash Attention.
356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
360
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
361
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
362
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
363
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
364
+ max_seqlen (`int`, *optional*):
365
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
366
+ batch_size (`int`, *optional*):
367
+ Batch size of the input sequences. Used to pad the output tensors.
368
+ seq_len (`int`, *optional*):
369
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
370
+ """
371
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
372
+ self._maybe_set_compile()
373
+
374
+ if input_ids is not None:
375
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
376
+
377
+ if batch_size is None and seq_len is None:
378
+ if inputs_embeds is not None:
379
+ batch_size, seq_len = inputs_embeds.shape[:2]
380
+ else:
381
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
382
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
383
+
384
+ if attention_mask is None:
385
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
386
+
387
+ # Ensure output_hidden_states is True in case pooling mode requires all hidden states
388
+ output_hidden_states = True
389
+
390
+ outputs = self.model(
391
+ input_ids=input_ids,
392
+ attention_mask=attention_mask,
393
+ sliding_window_mask=sliding_window_mask,
394
+ position_ids=position_ids,
395
+ inputs_embeds=inputs_embeds,
396
+ indices=indices,
397
+ cu_seqlens=cu_seqlens,
398
+ max_seqlen=max_seqlen,
399
+ batch_size=batch_size,
400
+ seq_len=seq_len,
401
+ output_attentions=output_attentions,
402
+ output_hidden_states=output_hidden_states,
403
+ return_dict=return_dict,
404
+ )
405
+ last_hidden_state = outputs[0]
406
+ hidden_states = outputs[1]
407
+
408
+ last_hidden_state = _pool_modchembert_output(
409
+ self,
410
+ last_hidden_state,
411
+ hidden_states,
412
+ typing.cast(torch.Tensor, attention_mask),
413
+ )
414
+ pooled_output = self.head(last_hidden_state)
415
+ pooled_output = self.drop(pooled_output)
416
+ logits = self.classifier(pooled_output)
417
+
418
+ loss = None
419
+ if labels is not None:
420
+ if self.config.problem_type is None:
421
+ if self.num_labels == 1:
422
+ self.config.problem_type = "regression"
423
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
424
+ self.config.problem_type = "single_label_classification"
425
+ else:
426
+ self.config.problem_type = "multi_label_classification"
427
+
428
+ if self.config.problem_type == "regression":
429
+ loss_fct = MSELoss()
430
+ if self.num_labels == 1:
431
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
432
+ else:
433
+ loss = loss_fct(logits, labels)
434
+ elif self.config.problem_type == "single_label_classification":
435
+ loss_fct = CrossEntropyLoss()
436
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
437
+ elif self.config.problem_type == "multi_label_classification":
438
+ loss_fct = BCEWithLogitsLoss()
439
+ loss = loss_fct(logits, labels)
440
+
441
+ if not return_dict:
442
+ output = (logits,)
443
+ return ((loss,) + output) if loss is not None else output
444
+
445
+ return SequenceClassifierOutput(
446
+ loss=loss,
447
+ logits=logits,
448
+ hidden_states=outputs.hidden_states,
449
+ attentions=outputs.attentions,
450
+ )
451
+
452
+
453
+ def _pool_modchembert_output(
454
+ module: ModChemBertForSequenceClassification,
455
+ last_hidden_state: torch.Tensor,
456
+ hidden_states: list[torch.Tensor],
457
+ attention_mask: torch.Tensor,
458
+ ):
459
+ """
460
+ Apply pooling strategy to hidden states for sequence-level classification/regression tasks.
461
+
462
+ This function implements various pooling strategies to aggregate sequence representations
463
+ into a single vector for downstream classification or regression tasks. The pooling method
464
+ is determined by the `classifier_pooling` configuration parameter.
465
+
466
+ Available pooling strategies:
467
+ - cls: Use the CLS token ([CLS]) representation from the last hidden state
468
+ - mean: Average pooling over all tokens in the sequence (attention-weighted)
469
+ - max_cls: Element-wise max pooling over the last k hidden states, then take CLS token
470
+ - cls_mha: Multi-head attention with CLS token as query and full sequence as keys/values
471
+ - max_seq_mha: Max pooling over last k states + multi-head attention with CLS as query
472
+ - max_seq_mean: Max pooling over last k hidden states, then mean pooling over sequence
473
+ - sum_mean: Sum all hidden states across layers, then mean pool over sequence
474
+ - sum_sum: Sum all hidden states across layers, then sum pool over sequence
475
+ - mean_sum: Mean all hidden states across layers, then sum pool over sequence
476
+ - mean_mean: Mean all hidden states across layers, then mean pool over sequence
477
+
478
+ Args:
479
+ module: The model instance containing configuration and pooling attention if needed
480
+ last_hidden_state: Final layer hidden states of shape (batch_size, seq_len, hidden_size)
481
+ hidden_states: List of hidden states from all layers, each of shape (batch_size, seq_len, hidden_size)
482
+ attention_mask: Attention mask of shape (batch_size, seq_len) indicating valid tokens
483
+
484
+ Returns:
485
+ torch.Tensor: Pooled representation of shape (batch_size, hidden_size)
486
+
487
+ Note:
488
+ Some pooling strategies (cls_mha, max_seq_mha) require the module to have a pooling_attn
489
+ attribute containing a ModChemBertPoolingAttention instance.
490
+ """
491
+ config = typing.cast(ModChemBertConfig, module.config)
492
+ if config.classifier_pooling == "cls":
493
+ last_hidden_state = last_hidden_state[:, 0]
494
+ elif config.classifier_pooling == "mean":
495
+ last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(
496
+ dim=1, keepdim=True
497
+ )
498
+ elif config.classifier_pooling == "max_cls":
499
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
500
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
501
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
502
+ last_hidden_state = pooled_seq[:, 0, :] # (batch, hidden)
503
+ elif config.classifier_pooling == "cls_mha":
504
+ # Similar to max_seq_mha but without the max pooling step
505
+ # Query is CLS token (position 0); Keys/Values are full sequence
506
+ q = last_hidden_state[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
507
+ q = q.expand(-1, last_hidden_state.shape[1], -1) # (batch, seq_len, hidden)
508
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
509
+ q=q, kv=last_hidden_state, attention_mask=attention_mask
510
+ ) # (batch, seq_len, hidden)
511
+ last_hidden_state = torch.mean(attn_out, dim=1)
512
+ elif config.classifier_pooling == "max_seq_mha":
513
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
514
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
515
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
516
+ # Query is pooled CLS token (position 0); Keys/Values are pooled sequence
517
+ q = pooled_seq[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
518
+ q = q.expand(-1, pooled_seq.shape[1], -1) # (batch, seq_len, hidden)
519
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
520
+ q=q, kv=pooled_seq, attention_mask=attention_mask
521
+ ) # (batch, seq_len, hidden)
522
+ last_hidden_state = torch.mean(attn_out, dim=1)
523
+ elif config.classifier_pooling == "max_seq_mean":
524
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
525
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
526
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
527
+ last_hidden_state = torch.mean(pooled_seq, dim=1) # Mean over sequence length
528
+ elif config.classifier_pooling == "sum_mean":
529
+ # ChemLM uses the mean of all hidden states
530
+ # which outperforms using just the last layer mean or the cls embedding
531
+ # https://doi.org/10.1038/s42004-025-01484-4
532
+ # https://static-content.springer.com/esm/art%3A10.1038%2Fs42004-025-01484-4/MediaObjects/42004_2025_1484_MOESM2_ESM.pdf
533
+ all_hidden_states = torch.stack(hidden_states)
534
+ w = torch.sum(all_hidden_states, dim=0)
535
+ last_hidden_state = torch.mean(w, dim=1)
536
+ elif config.classifier_pooling == "sum_sum":
537
+ all_hidden_states = torch.stack(hidden_states)
538
+ w = torch.sum(all_hidden_states, dim=0)
539
+ last_hidden_state = torch.sum(w, dim=1)
540
+ elif config.classifier_pooling == "mean_sum":
541
+ all_hidden_states = torch.stack(hidden_states)
542
+ w = torch.mean(all_hidden_states, dim=0)
543
+ last_hidden_state = torch.sum(w, dim=1)
544
+ elif config.classifier_pooling == "mean_mean":
545
+ all_hidden_states = torch.stack(hidden_states)
546
+ w = torch.mean(all_hidden_states, dim=0)
547
+ last_hidden_state = torch.mean(w, dim=1)
548
+ return last_hidden_state
549
+
550
+
551
+ __all__ = [
552
+ "ModChemBertForMaskedLM",
553
+ "ModChemBertForSequenceClassification",
554
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
@@ -0,0 +1,2554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 256,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": 8,
13
+ "pad_id": 2,
14
+ "pad_type_id": 0,
15
+ "pad_token": "[PAD]"
16
+ },
17
+ "added_tokens": [
18
+ {
19
+ "id": 0,
20
+ "content": "[CLS]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": false,
25
+ "special": true
26
+ },
27
+ {
28
+ "id": 1,
29
+ "content": "[SEP]",
30
+ "single_word": false,
31
+ "lstrip": false,
32
+ "rstrip": false,
33
+ "normalized": false,
34
+ "special": true
35
+ },
36
+ {
37
+ "id": 2,
38
+ "content": "[PAD]",
39
+ "single_word": false,
40
+ "lstrip": false,
41
+ "rstrip": false,
42
+ "normalized": false,
43
+ "special": true
44
+ },
45
+ {
46
+ "id": 3,
47
+ "content": "[MASK]",
48
+ "single_word": false,
49
+ "lstrip": false,
50
+ "rstrip": false,
51
+ "normalized": false,
52
+ "special": true
53
+ },
54
+ {
55
+ "id": 2361,
56
+ "content": "[UNK]",
57
+ "single_word": false,
58
+ "lstrip": false,
59
+ "rstrip": false,
60
+ "normalized": false,
61
+ "special": true
62
+ }
63
+ ],
64
+ "normalizer": null,
65
+ "pre_tokenizer": {
66
+ "type": "ByteLevel",
67
+ "add_prefix_space": false,
68
+ "trim_offsets": true,
69
+ "use_regex": true
70
+ },
71
+ "post_processor": {
72
+ "type": "TemplateProcessing",
73
+ "single": [
74
+ {
75
+ "SpecialToken": {
76
+ "id": "[CLS]",
77
+ "type_id": 0
78
+ }
79
+ },
80
+ {
81
+ "Sequence": {
82
+ "id": "A",
83
+ "type_id": 0
84
+ }
85
+ },
86
+ {
87
+ "SpecialToken": {
88
+ "id": "[SEP]",
89
+ "type_id": 0
90
+ }
91
+ }
92
+ ],
93
+ "pair": [
94
+ {
95
+ "SpecialToken": {
96
+ "id": "[CLS]",
97
+ "type_id": 0
98
+ }
99
+ },
100
+ {
101
+ "Sequence": {
102
+ "id": "A",
103
+ "type_id": 0
104
+ }
105
+ },
106
+ {
107
+ "SpecialToken": {
108
+ "id": "[SEP]",
109
+ "type_id": 0
110
+ }
111
+ },
112
+ {
113
+ "Sequence": {
114
+ "id": "B",
115
+ "type_id": 0
116
+ }
117
+ },
118
+ {
119
+ "SpecialToken": {
120
+ "id": "[SEP]",
121
+ "type_id": 0
122
+ }
123
+ }
124
+ ],
125
+ "special_tokens": {
126
+ "[CLS]": {
127
+ "id": "[CLS]",
128
+ "ids": [
129
+ 0
130
+ ],
131
+ "tokens": [
132
+ "[CLS]"
133
+ ]
134
+ },
135
+ "[MASK]": {
136
+ "id": "[MASK]",
137
+ "ids": [
138
+ 3
139
+ ],
140
+ "tokens": [
141
+ "[MASK]"
142
+ ]
143
+ },
144
+ "[PAD]": {
145
+ "id": "[PAD]",
146
+ "ids": [
147
+ 2
148
+ ],
149
+ "tokens": [
150
+ "[PAD]"
151
+ ]
152
+ },
153
+ "[SEP]": {
154
+ "id": "[SEP]",
155
+ "ids": [
156
+ 1
157
+ ],
158
+ "tokens": [
159
+ "[SEP]"
160
+ ]
161
+ },
162
+ "[UNK]": {
163
+ "id": "[UNK]",
164
+ "ids": [
165
+ 2361
166
+ ],
167
+ "tokens": [
168
+ "[UNK]"
169
+ ]
170
+ }
171
+ }
172
+ },
173
+ "decoder": {
174
+ "type": "ByteLevel",
175
+ "add_prefix_space": false,
176
+ "trim_offsets": true,
177
+ "use_regex": true
178
+ },
179
+ "model": {
180
+ "type": "BPE",
181
+ "dropout": null,
182
+ "unk_token": "[UNK]",
183
+ "continuing_subword_prefix": null,
184
+ "end_of_word_suffix": null,
185
+ "fuse_unk": false,
186
+ "byte_fallback": false,
187
+ "ignore_merges": false,
188
+ "vocab": {
189
+ "[CLS]": 0,
190
+ "[SEP]": 1,
191
+ "[PAD]": 2,
192
+ "[MASK]": 3,
193
+ "C": 4,
194
+ "c": 5,
195
+ "(": 6,
196
+ ")": 7,
197
+ "1": 8,
198
+ "O": 9,
199
+ "N": 10,
200
+ "2": 11,
201
+ "=": 12,
202
+ "n": 13,
203
+ "3": 14,
204
+ "[C@H]": 15,
205
+ "[C@@H]": 16,
206
+ "F": 17,
207
+ "S": 18,
208
+ "4": 19,
209
+ "Cl": 20,
210
+ "-": 21,
211
+ "o": 22,
212
+ "s": 23,
213
+ "[nH]": 24,
214
+ "#": 25,
215
+ "/": 26,
216
+ "Br": 27,
217
+ "[C@]": 28,
218
+ "[C@@]": 29,
219
+ "[N+]": 30,
220
+ "[O-]": 31,
221
+ "5": 32,
222
+ "\\": 33,
223
+ ".": 34,
224
+ "I": 35,
225
+ "6": 36,
226
+ "[S@]": 37,
227
+ "[S@@]": 38,
228
+ "P": 39,
229
+ "[N-]": 40,
230
+ "[Si]": 41,
231
+ "7": 42,
232
+ "[n+]": 43,
233
+ "[2H]": 44,
234
+ "8": 45,
235
+ "[NH+]": 46,
236
+ "B": 47,
237
+ "9": 48,
238
+ "[C-]": 49,
239
+ "[Na+]": 50,
240
+ "[Cl-]": 51,
241
+ "[c-]": 52,
242
+ "[CH]": 53,
243
+ "%10": 54,
244
+ "[NH2+]": 55,
245
+ "[P+]": 56,
246
+ "[B]": 57,
247
+ "[I-]": 58,
248
+ "%11": 59,
249
+ "[CH2-]": 60,
250
+ "[O+]": 61,
251
+ "[NH3+]": 62,
252
+ "[C]": 63,
253
+ "[Br-]": 64,
254
+ "[IH2]": 65,
255
+ "[S-]": 66,
256
+ "[cH-]": 67,
257
+ "%12": 68,
258
+ "[nH+]": 69,
259
+ "[B-]": 70,
260
+ "[K+]": 71,
261
+ "[Sn]": 72,
262
+ "[Se]": 73,
263
+ "[CH-]": 74,
264
+ "[HH]": 75,
265
+ "[Y]": 76,
266
+ "[n-]": 77,
267
+ "[CH3-]": 78,
268
+ "[SiH]": 79,
269
+ "[S+]": 80,
270
+ "%13": 81,
271
+ "[SiH2]": 82,
272
+ "[Li+]": 83,
273
+ "[NH-]": 84,
274
+ "%14": 85,
275
+ "[Na]": 86,
276
+ "[CH2]": 87,
277
+ "[O-2]": 88,
278
+ "[U+2]": 89,
279
+ "[W]": 90,
280
+ "[Al]": 91,
281
+ "[P@]": 92,
282
+ "[Fe+2]": 93,
283
+ "[PH+]": 94,
284
+ "%15": 95,
285
+ "[Cl+3]": 96,
286
+ "[Zn+2]": 97,
287
+ "[Ir]": 98,
288
+ "[Mg+2]": 99,
289
+ "[Pt+2]": 100,
290
+ "[OH2+]": 101,
291
+ "[As]": 102,
292
+ "[Fe]": 103,
293
+ "[OH+]": 104,
294
+ "[Zr+2]": 105,
295
+ "[3H]": 106,
296
+ "[Ge]": 107,
297
+ "[SiH3]": 108,
298
+ "[OH-]": 109,
299
+ "[NH4+]": 110,
300
+ "[Cu+2]": 111,
301
+ "[P@@]": 112,
302
+ "p": 113,
303
+ "[Pt]": 114,
304
+ "%16": 115,
305
+ "[Ca+2]": 116,
306
+ "[Zr]": 117,
307
+ "[F-]": 118,
308
+ "[C+]": 119,
309
+ "[Ti]": 120,
310
+ "[P-]": 121,
311
+ "[V]": 122,
312
+ "[se]": 123,
313
+ "[U]": 124,
314
+ "[O]": 125,
315
+ "[Ni+2]": 126,
316
+ "[Zn]": 127,
317
+ "[Co]": 128,
318
+ "[Ni]": 129,
319
+ "[Pd+2]": 130,
320
+ "[Cu]": 131,
321
+ "%17": 132,
322
+ "[Cu+]": 133,
323
+ "[Te]": 134,
324
+ "[H+]": 135,
325
+ "[CH+]": 136,
326
+ "[Li]": 137,
327
+ "[Pd]": 138,
328
+ "[Mo]": 139,
329
+ "[Ru+2]": 140,
330
+ "[o+]": 141,
331
+ "[Re]": 142,
332
+ "[SH+]": 143,
333
+ "%18": 144,
334
+ "[Ac]": 145,
335
+ "[Cr]": 146,
336
+ "[NH2-]": 147,
337
+ "[K]": 148,
338
+ "[13CH2]": 149,
339
+ "[c]": 150,
340
+ "[Zr+4]": 151,
341
+ "[Tl]": 152,
342
+ "[13C]": 153,
343
+ "[Mn]": 154,
344
+ "[N@+]": 155,
345
+ "[Hg]": 156,
346
+ "[Rh]": 157,
347
+ "[Ti+4]": 158,
348
+ "[Sb]": 159,
349
+ "[Co+2]": 160,
350
+ "[Ag+]": 161,
351
+ "[Ru]": 162,
352
+ "%19": 163,
353
+ "[N@@+]": 164,
354
+ "[Ti+2]": 165,
355
+ "[Al+3]": 166,
356
+ "[Pb]": 167,
357
+ "[I+]": 168,
358
+ "[18F]": 169,
359
+ "[s+]": 170,
360
+ "[Rb+]": 171,
361
+ "[Ba+2]": 172,
362
+ "[H-]": 173,
363
+ "[Fe+3]": 174,
364
+ "[Ir+3]": 175,
365
+ "[13cH]": 176,
366
+ "%20": 177,
367
+ "[AlH2]": 178,
368
+ "[Au+]": 179,
369
+ "[13c]": 180,
370
+ "[SH2+]": 181,
371
+ "[Sn+2]": 182,
372
+ "[Mn+2]": 183,
373
+ "[Si-]": 184,
374
+ "[Ag]": 185,
375
+ "[N]": 186,
376
+ "[Bi]": 187,
377
+ "%21": 188,
378
+ "[In]": 189,
379
+ "[CH2+]": 190,
380
+ "[Y+3]": 191,
381
+ "[Ga]": 192,
382
+ "%22": 193,
383
+ "[Co+3]": 194,
384
+ "[Au]": 195,
385
+ "[13CH3]": 196,
386
+ "[Mg]": 197,
387
+ "[Cs+]": 198,
388
+ "[W+2]": 199,
389
+ "[Hf]": 200,
390
+ "[Zn+]": 201,
391
+ "[Se-]": 202,
392
+ "[S-2]": 203,
393
+ "[Ca]": 204,
394
+ "[pH]": 205,
395
+ "[ClH+]": 206,
396
+ "[Ti+3]": 207,
397
+ "%23": 208,
398
+ "[Ru+]": 209,
399
+ "[SH-]": 210,
400
+ "[13CH]": 211,
401
+ "[IH+]": 212,
402
+ "[Hf+4]": 213,
403
+ "[Rf]": 214,
404
+ "[OH3+]": 215,
405
+ "%24": 216,
406
+ "[Pt+4]": 217,
407
+ "[Zr+3]": 218,
408
+ "[PH3+]": 219,
409
+ "[Sr+2]": 220,
410
+ "[Cd+2]": 221,
411
+ "[Cd]": 222,
412
+ "%25": 223,
413
+ "[Os]": 224,
414
+ "[BH-]": 225,
415
+ "[Sn+4]": 226,
416
+ "[Cr+3]": 227,
417
+ "[Ru+3]": 228,
418
+ "[PH2+]": 229,
419
+ "[Rh+2]": 230,
420
+ "[V+2]": 231,
421
+ "%26": 232,
422
+ "[Gd+3]": 233,
423
+ "[Pb+2]": 234,
424
+ "[PH]": 235,
425
+ "[Hg+]": 236,
426
+ "[Mo+2]": 237,
427
+ "[AlH]": 238,
428
+ "[Sn+]": 239,
429
+ "%27": 240,
430
+ "[Pd+]": 241,
431
+ "b": 242,
432
+ "[Rh+3]": 243,
433
+ "[Hg+2]": 244,
434
+ "[15NH]": 245,
435
+ "[14C]": 246,
436
+ "%28": 247,
437
+ "[Mn+3]": 248,
438
+ "[Si+]": 249,
439
+ "[SeH]": 250,
440
+ "[13C@H]": 251,
441
+ "[NH]": 252,
442
+ "[Ga+3]": 253,
443
+ "[SiH-]": 254,
444
+ "[13C@@H]": 255,
445
+ "[Ce]": 256,
446
+ "[Au+3]": 257,
447
+ "[Bi+3]": 258,
448
+ "[15N]": 259,
449
+ "%29": 260,
450
+ "[BH3-]": 261,
451
+ "[14cH]": 262,
452
+ "[Ti+]": 263,
453
+ "[Gd]": 264,
454
+ "[cH+]": 265,
455
+ "[Cr+2]": 266,
456
+ "[Sb-]": 267,
457
+ "%30": 268,
458
+ "[Be+2]": 269,
459
+ "[Al+]": 270,
460
+ "[te]": 271,
461
+ "[11CH3]": 272,
462
+ "[Sm]": 273,
463
+ "[Pr]": 274,
464
+ "[La]": 275,
465
+ "%31": 276,
466
+ "[Al-]": 277,
467
+ "[Ta]": 278,
468
+ "[125I]": 279,
469
+ "[BH2-]": 280,
470
+ "[Nb]": 281,
471
+ "[Si@]": 282,
472
+ "%32": 283,
473
+ "[14c]": 284,
474
+ "[Sb+3]": 285,
475
+ "[Ba]": 286,
476
+ "%33": 287,
477
+ "[Os+2]": 288,
478
+ "[Si@@]": 289,
479
+ "[La+3]": 290,
480
+ "[15n]": 291,
481
+ "[15NH2]": 292,
482
+ "[Nd+3]": 293,
483
+ "%34": 294,
484
+ "[14CH2]": 295,
485
+ "[18O]": 296,
486
+ "[Nd]": 297,
487
+ "[GeH]": 298,
488
+ "[Ni+3]": 299,
489
+ "[Eu]": 300,
490
+ "[Dy+3]": 301,
491
+ "[Sc]": 302,
492
+ "%36": 303,
493
+ "[Se-2]": 304,
494
+ "[As+]": 305,
495
+ "%35": 306,
496
+ "[AsH]": 307,
497
+ "[Tb]": 308,
498
+ "[Sb+5]": 309,
499
+ "[Se+]": 310,
500
+ "[Ce+3]": 311,
501
+ "[c+]": 312,
502
+ "[In+3]": 313,
503
+ "[SnH]": 314,
504
+ "[Mo+4]": 315,
505
+ "%37": 316,
506
+ "[V+4]": 317,
507
+ "[Eu+3]": 318,
508
+ "[Hf+2]": 319,
509
+ "%38": 320,
510
+ "[Pt+]": 321,
511
+ "[p+]": 322,
512
+ "[123I]": 323,
513
+ "[Tl+]": 324,
514
+ "[Sm+3]": 325,
515
+ "%39": 326,
516
+ "[Yb+3]": 327,
517
+ "%40": 328,
518
+ "[Yb]": 329,
519
+ "[Os+]": 330,
520
+ "%41": 331,
521
+ "[10B]": 332,
522
+ "[Sc+3]": 333,
523
+ "[Al+2]": 334,
524
+ "%42": 335,
525
+ "[Sr]": 336,
526
+ "[Tb+3]": 337,
527
+ "[Po]": 338,
528
+ "[Tc]": 339,
529
+ "[PH-]": 340,
530
+ "[AlH3]": 341,
531
+ "[Ar]": 342,
532
+ "[U+4]": 343,
533
+ "[SnH2]": 344,
534
+ "[Cl+2]": 345,
535
+ "[si]": 346,
536
+ "[Fe+]": 347,
537
+ "[14CH3]": 348,
538
+ "[U+3]": 349,
539
+ "[Cl+]": 350,
540
+ "%43": 351,
541
+ "[GeH2]": 352,
542
+ "%44": 353,
543
+ "[Er+3]": 354,
544
+ "[Mo+3]": 355,
545
+ "[I+2]": 356,
546
+ "[Fe+4]": 357,
547
+ "[99Tc]": 358,
548
+ "%45": 359,
549
+ "[11C]": 360,
550
+ "%46": 361,
551
+ "[SnH3]": 362,
552
+ "[S]": 363,
553
+ "[Te+]": 364,
554
+ "[Er]": 365,
555
+ "[Lu+3]": 366,
556
+ "[11B]": 367,
557
+ "%47": 368,
558
+ "%48": 369,
559
+ "[P]": 370,
560
+ "[Tm]": 371,
561
+ "[Th]": 372,
562
+ "[Dy]": 373,
563
+ "[Pr+3]": 374,
564
+ "[Ta+5]": 375,
565
+ "[Nb+5]": 376,
566
+ "[Rb]": 377,
567
+ "[GeH3]": 378,
568
+ "[Br+2]": 379,
569
+ "%49": 380,
570
+ "[131I]": 381,
571
+ "[Fm]": 382,
572
+ "[Cs]": 383,
573
+ "[BH4-]": 384,
574
+ "[Lu]": 385,
575
+ "[15nH]": 386,
576
+ "%50": 387,
577
+ "[Ru+6]": 388,
578
+ "[b-]": 389,
579
+ "[Ho]": 390,
580
+ "[Th+4]": 391,
581
+ "[Ru+4]": 392,
582
+ "%52": 393,
583
+ "[14CH]": 394,
584
+ "%51": 395,
585
+ "[Cr+6]": 396,
586
+ "[18OH]": 397,
587
+ "[Ho+3]": 398,
588
+ "[Ce+4]": 399,
589
+ "[Bi+2]": 400,
590
+ "[Co+]": 401,
591
+ "%53": 402,
592
+ "[Yb+2]": 403,
593
+ "[Fe+6]": 404,
594
+ "[Be]": 405,
595
+ "%54": 406,
596
+ "[SH3+]": 407,
597
+ "[Np]": 408,
598
+ "[As-]": 409,
599
+ "%55": 410,
600
+ "[14C@@H]": 411,
601
+ "[Ir+2]": 412,
602
+ "[GaH3]": 413,
603
+ "[p-]": 414,
604
+ "[GeH4]": 415,
605
+ "[Sn+3]": 416,
606
+ "[Os+4]": 417,
607
+ "%56": 418,
608
+ "[14C@H]": 419,
609
+ "[sH+]": 420,
610
+ "[19F]": 421,
611
+ "[Eu+2]": 422,
612
+ "[TlH]": 423,
613
+ "%57": 424,
614
+ "[Cr+4]": 425,
615
+ "%58": 426,
616
+ "[B@@-]": 427,
617
+ "[SiH+]": 428,
618
+ "[At]": 429,
619
+ "[Am]": 430,
620
+ "[Fe+5]": 431,
621
+ "[AsH2]": 432,
622
+ "[Si+4]": 433,
623
+ "[B@-]": 434,
624
+ "[Pu]": 435,
625
+ "[SbH]": 436,
626
+ "[P-2]": 437,
627
+ "[Tm+3]": 438,
628
+ "*": 439,
629
+ "%59": 440,
630
+ "[se+]": 441,
631
+ "%60": 442,
632
+ "[oH+]": 443,
633
+ "[1H]": 444,
634
+ "[15N+]": 445,
635
+ "[124I]": 446,
636
+ "[S@@+]": 447,
637
+ "[P-3]": 448,
638
+ "[H]": 449,
639
+ "[IH2+]": 450,
640
+ "[TeH]": 451,
641
+ "[Xe]": 452,
642
+ "[PH4+]": 453,
643
+ "[Cr+]": 454,
644
+ "[Cm]": 455,
645
+ "[I+3]": 456,
646
+ "%61": 457,
647
+ "[Nb+2]": 458,
648
+ "[Ru+5]": 459,
649
+ "%62": 460,
650
+ "[Ta+2]": 461,
651
+ "[Tc+4]": 462,
652
+ "[CH3+]": 463,
653
+ "[Pm]": 464,
654
+ "[Si@H]": 465,
655
+ "[No]": 466,
656
+ "%63": 467,
657
+ "[Cr+5]": 468,
658
+ "[Th+2]": 469,
659
+ "[Zn-2]": 470,
660
+ "[13C@]": 471,
661
+ "[Lr]": 472,
662
+ "%64": 473,
663
+ "[99Tc+3]": 474,
664
+ "%65": 475,
665
+ "[13C@@]": 476,
666
+ "%66": 477,
667
+ "[Fe-]": 478,
668
+ "[17O]": 479,
669
+ "[siH]": 480,
670
+ "[Sb+]": 481,
671
+ "[OH]": 482,
672
+ "[IH]": 483,
673
+ "[11CH2]": 484,
674
+ "[Cf]": 485,
675
+ "[SiH2+]": 486,
676
+ "[Gd+2]": 487,
677
+ "[In+]": 488,
678
+ "[Si@@H]": 489,
679
+ "[Mn+]": 490,
680
+ "[99Tc+4]": 491,
681
+ "[Ga-]": 492,
682
+ "%67": 493,
683
+ "[S@+]": 494,
684
+ "[Ge+4]": 495,
685
+ "[Tl+3]": 496,
686
+ "[16OH]": 497,
687
+ "%68": 498,
688
+ "[2H-]": 499,
689
+ "[Ra]": 500,
690
+ "[si-]": 501,
691
+ "[NiH2]": 502,
692
+ "[P@@H]": 503,
693
+ "[Rh+]": 504,
694
+ "[12C]": 505,
695
+ "[35S]": 506,
696
+ "[32P]": 507,
697
+ "[SiH2-]": 508,
698
+ "[AlH2+]": 509,
699
+ "[16O]": 510,
700
+ "%69": 511,
701
+ "[BiH]": 512,
702
+ "[BiH2]": 513,
703
+ "[Zn-]": 514,
704
+ "[BH]": 515,
705
+ "[Tc+3]": 516,
706
+ "[Ir+]": 517,
707
+ "[Ni+]": 518,
708
+ "%70": 519,
709
+ "[InH2]": 520,
710
+ "[InH]": 521,
711
+ "[Nb+3]": 522,
712
+ "[PbH]": 523,
713
+ "[Bi+]": 524,
714
+ "%71": 525,
715
+ "[As+3]": 526,
716
+ "%72": 527,
717
+ "[18O-]": 528,
718
+ "[68Ga+3]": 529,
719
+ "%73": 530,
720
+ "[Pa]": 531,
721
+ "[76Br]": 532,
722
+ "[Tc+5]": 533,
723
+ "[pH+]": 534,
724
+ "[64Cu+2]": 535,
725
+ "[Ru+8]": 536,
726
+ "%74": 537,
727
+ "[PH2-]": 538,
728
+ "[Si+2]": 539,
729
+ "[17OH]": 540,
730
+ "[RuH]": 541,
731
+ "[111In+3]": 542,
732
+ "[AlH+]": 543,
733
+ "%75": 544,
734
+ "%76": 545,
735
+ "[W+]": 546,
736
+ "[SbH2]": 547,
737
+ "[PoH]": 548,
738
+ "[Ru-]": 549,
739
+ "[XeH]": 550,
740
+ "[Tc+2]": 551,
741
+ "[13C-]": 552,
742
+ "[Br+]": 553,
743
+ "[Pt-2]": 554,
744
+ "[Es]": 555,
745
+ "[Cu-]": 556,
746
+ "[Mg+]": 557,
747
+ "[3HH]": 558,
748
+ "[P@H]": 559,
749
+ "[ClH2+]": 560,
750
+ "%77": 561,
751
+ "[SH]": 562,
752
+ "[Au-]": 563,
753
+ "[2HH]": 564,
754
+ "%78": 565,
755
+ "[Sn-]": 566,
756
+ "[11CH]": 567,
757
+ "[PdH2]": 568,
758
+ "0": 569,
759
+ "[Os+6]": 570,
760
+ "%79": 571,
761
+ "[Mo+]": 572,
762
+ "%80": 573,
763
+ "[al]": 574,
764
+ "[PbH2]": 575,
765
+ "[64Cu]": 576,
766
+ "[Cl]": 577,
767
+ "[12CH3]": 578,
768
+ "%81": 579,
769
+ "[Tc+7]": 580,
770
+ "[11c]": 581,
771
+ "%82": 582,
772
+ "[Li-]": 583,
773
+ "[99Tc+5]": 584,
774
+ "[He]": 585,
775
+ "[12c]": 586,
776
+ "[Kr]": 587,
777
+ "[RuH+2]": 588,
778
+ "[35Cl]": 589,
779
+ "[Pd-2]": 590,
780
+ "[GaH2]": 591,
781
+ "[4H]": 592,
782
+ "[Sg]": 593,
783
+ "[Cu-2]": 594,
784
+ "[Br+3]": 595,
785
+ "%83": 596,
786
+ "[37Cl]": 597,
787
+ "[211At]": 598,
788
+ "[IrH+2]": 599,
789
+ "[Mt]": 600,
790
+ "[Ir-2]": 601,
791
+ "[In-]": 602,
792
+ "[12cH]": 603,
793
+ "[12CH2]": 604,
794
+ "[RuH2]": 605,
795
+ "[99Tc+7]": 606,
796
+ "%84": 607,
797
+ "[15n+]": 608,
798
+ "[ClH2+2]": 609,
799
+ "[16N]": 610,
800
+ "[111In]": 611,
801
+ "[Tc+]": 612,
802
+ "[Ru-2]": 613,
803
+ "[12CH]": 614,
804
+ "[si+]": 615,
805
+ "[Tc+6]": 616,
806
+ "%85": 617,
807
+ "%86": 618,
808
+ "[90Y]": 619,
809
+ "[Pd-]": 620,
810
+ "[188Re]": 621,
811
+ "[RuH+]": 622,
812
+ "[NiH]": 623,
813
+ "[SiH3-]": 624,
814
+ "[14n]": 625,
815
+ "[CH3]": 626,
816
+ "[14N]": 627,
817
+ "[10BH2]": 628,
818
+ "%88": 629,
819
+ "%89": 630,
820
+ "%90": 631,
821
+ "[34S]": 632,
822
+ "[77Br]": 633,
823
+ "[GaH]": 634,
824
+ "[Br]": 635,
825
+ "[Ge@]": 636,
826
+ "[B@@H-]": 637,
827
+ "[CuH]": 638,
828
+ "[SiH4]": 639,
829
+ "[3H-]": 640,
830
+ "%87": 641,
831
+ "%91": 642,
832
+ "%92": 643,
833
+ "[67Cu]": 644,
834
+ "[I]": 645,
835
+ "[177Lu]": 646,
836
+ "[ReH]": 647,
837
+ "[67Ga+3]": 648,
838
+ "[Db]": 649,
839
+ "[177Lu+3]": 650,
840
+ "[AlH2-]": 651,
841
+ "[Si+3]": 652,
842
+ "[Ti-2]": 653,
843
+ "[RuH+3]": 654,
844
+ "[al+]": 655,
845
+ "[68Ga]": 656,
846
+ "[2H+]": 657,
847
+ "[B@H-]": 658,
848
+ "[WH2]": 659,
849
+ "[OsH]": 660,
850
+ "[Ir-3]": 661,
851
+ "[AlH-]": 662,
852
+ "[Bk]": 663,
853
+ "[75Se]": 664,
854
+ "[14C@]": 665,
855
+ "[Pt-]": 666,
856
+ "[N@@H+]": 667,
857
+ "[Nb-]": 668,
858
+ "[13NH2]": 669,
859
+ "%93": 670,
860
+ "[186Re]": 671,
861
+ "[Tb+4]": 672,
862
+ "[PtH]": 673,
863
+ "[IrH2]": 674,
864
+ "[Hg-2]": 675,
865
+ "[AlH3-]": 676,
866
+ "[PdH+]": 677,
867
+ "[Md]": 678,
868
+ "[RhH+2]": 679,
869
+ "[11cH]": 680,
870
+ "[Co-2]": 681,
871
+ "[15N-]": 682,
872
+ "[ZrH2]": 683,
873
+ "%94": 684,
874
+ "[Hg-]": 685,
875
+ "[127I]": 686,
876
+ "[AsH2+]": 687,
877
+ "[MoH2]": 688,
878
+ "[Te+4]": 689,
879
+ "[14C@@]": 690,
880
+ "[As+5]": 691,
881
+ "[SnH+3]": 692,
882
+ "[Ge@@]": 693,
883
+ "[6Li+]": 694,
884
+ "[WH]": 695,
885
+ "[Ne]": 696,
886
+ "[14NH2]": 697,
887
+ "[14NH]": 698,
888
+ "[12C@@H]": 699,
889
+ "[Os+7]": 700,
890
+ "[RhH]": 701,
891
+ "[Al-3]": 702,
892
+ "[SnH+]": 703,
893
+ "[15NH3+]": 704,
894
+ "[Zr+]": 705,
895
+ "[197Hg+]": 706,
896
+ "%95": 707,
897
+ "%96": 708,
898
+ "[90Y+3]": 709,
899
+ "[Os-2]": 710,
900
+ "[98Tc+5]": 711,
901
+ "[15NH3]": 712,
902
+ "[bH-]": 713,
903
+ "[33P]": 714,
904
+ "[Zr-2]": 715,
905
+ "[15O]": 716,
906
+ "[Rh-]": 717,
907
+ "[PbH3]": 718,
908
+ "[PH2]": 719,
909
+ "[Ni-]": 720,
910
+ "[CuH+]": 721,
911
+ "%97": 722,
912
+ "%98": 723,
913
+ "%99": 724,
914
+ "[Os+5]": 725,
915
+ "[PtH+]": 726,
916
+ "[ReH4]": 727,
917
+ "[16NH]": 728,
918
+ "[82Br]": 729,
919
+ "[W-]": 730,
920
+ "[18F-]": 731,
921
+ "[15NH4+]": 732,
922
+ "[Se+4]": 733,
923
+ "[SeH-]": 734,
924
+ "[67Cu+2]": 735,
925
+ "[12C@H]": 736,
926
+ "[AsH3]": 737,
927
+ "[HgH]": 738,
928
+ "[10B-]": 739,
929
+ "[99Tc+6]": 740,
930
+ "[117Sn+4]": 741,
931
+ "[Te@]": 742,
932
+ "[P@+]": 743,
933
+ "[35SH]": 744,
934
+ "[SeH+]": 745,
935
+ "[Ni-2]": 746,
936
+ "[Al-2]": 747,
937
+ "[TeH2]": 748,
938
+ "[Bh]": 749,
939
+ "[99Tc+2]": 750,
940
+ "[Os+8]": 751,
941
+ "[PH-2]": 752,
942
+ "[7Li+]": 753,
943
+ "[14nH]": 754,
944
+ "[AlH+2]": 755,
945
+ "[18FH]": 756,
946
+ "[SnH4]": 757,
947
+ "[18O-2]": 758,
948
+ "[IrH]": 759,
949
+ "[13N]": 760,
950
+ "[Te@@]": 761,
951
+ "[Rh-3]": 762,
952
+ "[15NH+]": 763,
953
+ "[AsH3+]": 764,
954
+ "[SeH2]": 765,
955
+ "[AsH+]": 766,
956
+ "[CoH2]": 767,
957
+ "[16NH2]": 768,
958
+ "[AsH-]": 769,
959
+ "[203Hg+]": 770,
960
+ "[P@@+]": 771,
961
+ "[166Ho+3]": 772,
962
+ "[60Co+3]": 773,
963
+ "[13CH2-]": 774,
964
+ "[SeH2+]": 775,
965
+ "[75Br]": 776,
966
+ "[TlH2]": 777,
967
+ "[80Br]": 778,
968
+ "[siH+]": 779,
969
+ "[Ca+]": 780,
970
+ "[153Sm+3]": 781,
971
+ "[PdH]": 782,
972
+ "[225Ac]": 783,
973
+ "[13CH3-]": 784,
974
+ "[AlH4-]": 785,
975
+ "[FeH]": 786,
976
+ "[13CH-]": 787,
977
+ "[14C-]": 788,
978
+ "[11C-]": 789,
979
+ "[153Sm]": 790,
980
+ "[Re-]": 791,
981
+ "[te+]": 792,
982
+ "[13CH4]": 793,
983
+ "[ClH+2]": 794,
984
+ "[8CH2]": 795,
985
+ "[99Mo]": 796,
986
+ "[ClH3+3]": 797,
987
+ "[SbH3]": 798,
988
+ "[25Mg+2]": 799,
989
+ "[16N+]": 800,
990
+ "[SnH2+]": 801,
991
+ "[11C@H]": 802,
992
+ "[122I]": 803,
993
+ "[Re-2]": 804,
994
+ "[RuH2+2]": 805,
995
+ "[ZrH]": 806,
996
+ "[Bi-]": 807,
997
+ "[Pr+]": 808,
998
+ "[Rn]": 809,
999
+ "[Fr]": 810,
1000
+ "[36Cl]": 811,
1001
+ "[18o]": 812,
1002
+ "[YH]": 813,
1003
+ "[79Br]": 814,
1004
+ "[121I]": 815,
1005
+ "[113In+3]": 816,
1006
+ "[TaH]": 817,
1007
+ "[RhH2]": 818,
1008
+ "[Ta-]": 819,
1009
+ "[67Ga]": 820,
1010
+ "[ZnH+]": 821,
1011
+ "[SnH2-]": 822,
1012
+ "[OsH2]": 823,
1013
+ "[16F]": 824,
1014
+ "[FeH2]": 825,
1015
+ "[14O]": 826,
1016
+ "[PbH2+2]": 827,
1017
+ "[BH2]": 828,
1018
+ "[6H]": 829,
1019
+ "[125Te]": 830,
1020
+ "[197Hg]": 831,
1021
+ "[TaH2]": 832,
1022
+ "[TaH3]": 833,
1023
+ "[76As]": 834,
1024
+ "[Nb-2]": 835,
1025
+ "[14N+]": 836,
1026
+ "[125I-]": 837,
1027
+ "[33S]": 838,
1028
+ "[IH2+2]": 839,
1029
+ "[NH2]": 840,
1030
+ "[PtH2]": 841,
1031
+ "[MnH]": 842,
1032
+ "[19C]": 843,
1033
+ "[17F]": 844,
1034
+ "[1H-]": 845,
1035
+ "[SnH4+2]": 846,
1036
+ "[Mn-2]": 847,
1037
+ "[15NH2+]": 848,
1038
+ "[TiH2]": 849,
1039
+ "[ReH7]": 850,
1040
+ "[Cd-2]": 851,
1041
+ "[Fe-3]": 852,
1042
+ "[SH2]": 853,
1043
+ "[17O-]": 854,
1044
+ "[siH-]": 855,
1045
+ "[CoH+]": 856,
1046
+ "[VH]": 857,
1047
+ "[10BH]": 858,
1048
+ "[Ru-3]": 859,
1049
+ "[13O]": 860,
1050
+ "[5H]": 861,
1051
+ "[15n-]": 862,
1052
+ "[153Gd]": 863,
1053
+ "[12C@]": 864,
1054
+ "[11CH3-]": 865,
1055
+ "[IrH3]": 866,
1056
+ "[RuH3]": 867,
1057
+ "[74Se]": 868,
1058
+ "[Se@]": 869,
1059
+ "[Hf+]": 870,
1060
+ "[77Se]": 871,
1061
+ "[166Ho]": 872,
1062
+ "[59Fe+2]": 873,
1063
+ "[203Hg]": 874,
1064
+ "[18OH-]": 875,
1065
+ "[8CH]": 876,
1066
+ "[12C@@]": 877,
1067
+ "[11CH4]": 878,
1068
+ "[15C]": 879,
1069
+ "[249Cf]": 880,
1070
+ "[PbH4]": 881,
1071
+ "[64Zn]": 882,
1072
+ "[99Tc+]": 883,
1073
+ "[14c-]": 884,
1074
+ "[149Pm]": 885,
1075
+ "[IrH4]": 886,
1076
+ "[Se@@]": 887,
1077
+ "[13OH]": 888,
1078
+ "[14CH3-]": 889,
1079
+ "[28Si]": 890,
1080
+ "[Rh-2]": 891,
1081
+ "[Fe-2]": 892,
1082
+ "[131I-]": 893,
1083
+ "[51Cr]": 894,
1084
+ "[62Cu+2]": 895,
1085
+ "[81Br]": 896,
1086
+ "[121Sb]": 897,
1087
+ "[7Li]": 898,
1088
+ "[89Zr+4]": 899,
1089
+ "[SbH3+]": 900,
1090
+ "[11C@@H]": 901,
1091
+ "[98Tc]": 902,
1092
+ "[59Fe+3]": 903,
1093
+ "[BiH2+]": 904,
1094
+ "[SbH+]": 905,
1095
+ "[TiH]": 906,
1096
+ "[14NH3]": 907,
1097
+ "[15OH]": 908,
1098
+ "[119Sn]": 909,
1099
+ "[201Hg]": 910,
1100
+ "[MnH+]": 911,
1101
+ "[201Tl]": 912,
1102
+ "[51Cr+3]": 913,
1103
+ "[123I-]": 914,
1104
+ "[MoH]": 915,
1105
+ "[AlH6-3]": 916,
1106
+ "[MnH2]": 917,
1107
+ "[WH3]": 918,
1108
+ "[213Bi+3]": 919,
1109
+ "[SnH2+2]": 920,
1110
+ "[123IH]": 921,
1111
+ "[13CH+]": 922,
1112
+ "[Zr-]": 923,
1113
+ "[74As]": 924,
1114
+ "[13C+]": 925,
1115
+ "[32P+]": 926,
1116
+ "[KrH]": 927,
1117
+ "[SiH+2]": 928,
1118
+ "[ClH3+2]": 929,
1119
+ "[13NH]": 930,
1120
+ "[9CH2]": 931,
1121
+ "[ZrH2+2]": 932,
1122
+ "[87Sr+2]": 933,
1123
+ "[35s]": 934,
1124
+ "[239Pu]": 935,
1125
+ "[198Au]": 936,
1126
+ "[241Am]": 937,
1127
+ "[203Hg+2]": 938,
1128
+ "[V+]": 939,
1129
+ "[YH2]": 940,
1130
+ "[195Pt]": 941,
1131
+ "[203Pb]": 942,
1132
+ "[RuH4]": 943,
1133
+ "[ThH2]": 944,
1134
+ "[AuH]": 945,
1135
+ "[66Ga+3]": 946,
1136
+ "[11B-]": 947,
1137
+ "[F]": 948,
1138
+ "[24Na+]": 949,
1139
+ "[85Sr+2]": 950,
1140
+ "[201Tl+]": 951,
1141
+ "[14CH4]": 952,
1142
+ "[32S]": 953,
1143
+ "[TeH2+]": 954,
1144
+ "[ClH2+3]": 955,
1145
+ "[AgH]": 956,
1146
+ "[Ge@H]": 957,
1147
+ "[44Ca+2]": 958,
1148
+ "[Os-]": 959,
1149
+ "[31P]": 960,
1150
+ "[15nH+]": 961,
1151
+ "[SbH4]": 962,
1152
+ "[TiH+]": 963,
1153
+ "[Ba+]": 964,
1154
+ "[57Co+2]": 965,
1155
+ "[Ta+]": 966,
1156
+ "[125IH]": 967,
1157
+ "[77As]": 968,
1158
+ "[129I]": 969,
1159
+ "[Fe-4]": 970,
1160
+ "[Ta-2]": 971,
1161
+ "[19O]": 972,
1162
+ "[12O]": 973,
1163
+ "[BiH3]": 974,
1164
+ "[237Np]": 975,
1165
+ "[252Cf]": 976,
1166
+ "[86Y]": 977,
1167
+ "[Cr-2]": 978,
1168
+ "[89Y]": 979,
1169
+ "[195Pt+2]": 980,
1170
+ "[si+2]": 981,
1171
+ "[58Fe+2]": 982,
1172
+ "[Hs]": 983,
1173
+ "[S@@H]": 984,
1174
+ "[8CH4]": 985,
1175
+ "[164Dy+3]": 986,
1176
+ "[47Ca+2]": 987,
1177
+ "[57Co]": 988,
1178
+ "[NbH2]": 989,
1179
+ "[ReH2]": 990,
1180
+ "[ZnH2]": 991,
1181
+ "[CrH2]": 992,
1182
+ "[17NH]": 993,
1183
+ "[ZrH3]": 994,
1184
+ "[RhH3]": 995,
1185
+ "[12C-]": 996,
1186
+ "[18O+]": 997,
1187
+ "[Bi-2]": 998,
1188
+ "[ClH4+3]": 999,
1189
+ "[Ni-3]": 1000,
1190
+ "[Ag-]": 1001,
1191
+ "[111In-]": 1002,
1192
+ "[Mo-2]": 1003,
1193
+ "[55Fe+3]": 1004,
1194
+ "[204Hg+]": 1005,
1195
+ "[35Cl-]": 1006,
1196
+ "[211Pb]": 1007,
1197
+ "[75Ge]": 1008,
1198
+ "[8B]": 1009,
1199
+ "[TeH3]": 1010,
1200
+ "[SnH3+]": 1011,
1201
+ "[Zr-3]": 1012,
1202
+ "[28F]": 1013,
1203
+ "[249Bk]": 1014,
1204
+ "[169Yb]": 1015,
1205
+ "[34SH]": 1016,
1206
+ "[6Li]": 1017,
1207
+ "[94Tc]": 1018,
1208
+ "[197Au]": 1019,
1209
+ "[195Pt+4]": 1020,
1210
+ "[169Yb+3]": 1021,
1211
+ "[32Cl]": 1022,
1212
+ "[82Se]": 1023,
1213
+ "[159Gd+3]": 1024,
1214
+ "[213Bi]": 1025,
1215
+ "[CoH+2]": 1026,
1216
+ "[36S]": 1027,
1217
+ "[35P]": 1028,
1218
+ "[Ru-4]": 1029,
1219
+ "[Cr-3]": 1030,
1220
+ "[60Co]": 1031,
1221
+ "[1H+]": 1032,
1222
+ "[18CH2]": 1033,
1223
+ "[Cd-]": 1034,
1224
+ "[152Sm+3]": 1035,
1225
+ "[106Ru]": 1036,
1226
+ "[238Pu]": 1037,
1227
+ "[220Rn]": 1038,
1228
+ "[45Ca+2]": 1039,
1229
+ "[89Sr+2]": 1040,
1230
+ "[239Np]": 1041,
1231
+ "[90Sr+2]": 1042,
1232
+ "[137Cs+]": 1043,
1233
+ "[165Dy]": 1044,
1234
+ "[68GaH3]": 1045,
1235
+ "[65Zn+2]": 1046,
1236
+ "[89Zr]": 1047,
1237
+ "[BiH2+2]": 1048,
1238
+ "[62Cu]": 1049,
1239
+ "[165Dy+3]": 1050,
1240
+ "[238U]": 1051,
1241
+ "[105Rh+3]": 1052,
1242
+ "[70Zn]": 1053,
1243
+ "[12B]": 1054,
1244
+ "[12OH]": 1055,
1245
+ "[18CH]": 1056,
1246
+ "[17CH]": 1057,
1247
+ "[42K]": 1058,
1248
+ "[76Br-]": 1059,
1249
+ "[71As]": 1060,
1250
+ "[NbH3]": 1061,
1251
+ "[ReH3]": 1062,
1252
+ "[OsH-]": 1063,
1253
+ "[WH4]": 1064,
1254
+ "[MoH3]": 1065,
1255
+ "[OsH4]": 1066,
1256
+ "[RuH6]": 1067,
1257
+ "[PtH3]": 1068,
1258
+ "[CuH2]": 1069,
1259
+ "[CoH3]": 1070,
1260
+ "[TiH4]": 1071,
1261
+ "[64Zn+2]": 1072,
1262
+ "[Si-2]": 1073,
1263
+ "[79BrH]": 1074,
1264
+ "[14CH2-]": 1075,
1265
+ "[PtH2+2]": 1076,
1266
+ "[Os-3]": 1077,
1267
+ "[29Si]": 1078,
1268
+ "[Ti-]": 1079,
1269
+ "[Se+6]": 1080,
1270
+ "[22Na+]": 1081,
1271
+ "[42K+]": 1082,
1272
+ "[131Cs+]": 1083,
1273
+ "[86Rb+]": 1084,
1274
+ "[134Cs+]": 1085,
1275
+ "[209Po]": 1086,
1276
+ "[208Po]": 1087,
1277
+ "[81Rb+]": 1088,
1278
+ "[203Tl+]": 1089,
1279
+ "[Zr-4]": 1090,
1280
+ "[148Sm]": 1091,
1281
+ "[147Sm]": 1092,
1282
+ "[37Cl-]": 1093,
1283
+ "[12CH4]": 1094,
1284
+ "[Ge@@H]": 1095,
1285
+ "[63Cu]": 1096,
1286
+ "[13CH2+]": 1097,
1287
+ "[AsH2-]": 1098,
1288
+ "[CeH]": 1099,
1289
+ "[SnH-]": 1100,
1290
+ "[UH]": 1101,
1291
+ "[9c]": 1102,
1292
+ "[21CH3]": 1103,
1293
+ "[TeH+]": 1104,
1294
+ "[57Co+3]": 1105,
1295
+ "[8BH2]": 1106,
1296
+ "[12BH2]": 1107,
1297
+ "[19BH2]": 1108,
1298
+ "[9BH2]": 1109,
1299
+ "[YbH2]": 1110,
1300
+ "[CrH+2]": 1111,
1301
+ "[208Bi]": 1112,
1302
+ "[152Gd]": 1113,
1303
+ "[61Cu]": 1114,
1304
+ "[115In]": 1115,
1305
+ "[60Co+2]": 1116,
1306
+ "[13NH2-]": 1117,
1307
+ "[120I]": 1118,
1308
+ "[18OH2]": 1119,
1309
+ "[75SeH]": 1120,
1310
+ "[SbH2+]": 1121,
1311
+ "[144Ce]": 1122,
1312
+ "[16n]": 1123,
1313
+ "[113In]": 1124,
1314
+ "[22nH]": 1125,
1315
+ "[129I-]": 1126,
1316
+ "[InH3]": 1127,
1317
+ "[32PH3]": 1128,
1318
+ "[234U]": 1129,
1319
+ "[235U]": 1130,
1320
+ "[59Fe]": 1131,
1321
+ "[82Rb+]": 1132,
1322
+ "[65Zn]": 1133,
1323
+ "[244Cm]": 1134,
1324
+ "[147Pm]": 1135,
1325
+ "[91Y]": 1136,
1326
+ "[237Pu]": 1137,
1327
+ "[231Pa]": 1138,
1328
+ "[253Cf]": 1139,
1329
+ "[127Te]": 1140,
1330
+ "[187Re]": 1141,
1331
+ "[236Np]": 1142,
1332
+ "[235Np]": 1143,
1333
+ "[72Zn]": 1144,
1334
+ "[253Es]": 1145,
1335
+ "[159Dy]": 1146,
1336
+ "[62Zn]": 1147,
1337
+ "[101Tc]": 1148,
1338
+ "[149Tb]": 1149,
1339
+ "[124I-]": 1150,
1340
+ "[SeH3+]": 1151,
1341
+ "[210Pb]": 1152,
1342
+ "[40K]": 1153,
1343
+ "[210Po]": 1154,
1344
+ "[214Pb]": 1155,
1345
+ "[218Po]": 1156,
1346
+ "[214Po]": 1157,
1347
+ "[7Be]": 1158,
1348
+ "[212Pb]": 1159,
1349
+ "[205Pb]": 1160,
1350
+ "[209Pb]": 1161,
1351
+ "[123Te]": 1162,
1352
+ "[202Pb]": 1163,
1353
+ "[72As]": 1164,
1354
+ "[201Pb]": 1165,
1355
+ "[70As]": 1166,
1356
+ "[73Ge]": 1167,
1357
+ "[200Pb]": 1168,
1358
+ "[198Pb]": 1169,
1359
+ "[66Ga]": 1170,
1360
+ "[73Se]": 1171,
1361
+ "[195Pb]": 1172,
1362
+ "[199Pb]": 1173,
1363
+ "[144Ce+3]": 1174,
1364
+ "[235U+2]": 1175,
1365
+ "[90Tc]": 1176,
1366
+ "[114In+3]": 1177,
1367
+ "[128I]": 1178,
1368
+ "[100Tc+]": 1179,
1369
+ "[82Br-]": 1180,
1370
+ "[191Pt+2]": 1181,
1371
+ "[191Pt+4]": 1182,
1372
+ "[193Pt+4]": 1183,
1373
+ "[31PH3]": 1184,
1374
+ "[125I+2]": 1185,
1375
+ "[131I+2]": 1186,
1376
+ "[125Te+4]": 1187,
1377
+ "[82Sr+2]": 1188,
1378
+ "[149Sm]": 1189,
1379
+ "[81BrH]": 1190,
1380
+ "[129Xe]": 1191,
1381
+ "[193Pt+2]": 1192,
1382
+ "[123I+2]": 1193,
1383
+ "[Cr-]": 1194,
1384
+ "[Co-]": 1195,
1385
+ "[227Th+4]": 1196,
1386
+ "[249Cf+3]": 1197,
1387
+ "[252Cf+3]": 1198,
1388
+ "[187Os]": 1199,
1389
+ "[16O-]": 1200,
1390
+ "[17O+]": 1201,
1391
+ "[16OH-]": 1202,
1392
+ "[98Tc+7]": 1203,
1393
+ "[58Co+2]": 1204,
1394
+ "[69Ga+3]": 1205,
1395
+ "[57Fe+2]": 1206,
1396
+ "[43K+]": 1207,
1397
+ "[16C]": 1208,
1398
+ "[52Fe+3]": 1209,
1399
+ "[SeH5]": 1210,
1400
+ "[194Pb]": 1211,
1401
+ "[196Pb]": 1212,
1402
+ "[197Pb]": 1213,
1403
+ "[213Pb]": 1214,
1404
+ "[9B]": 1215,
1405
+ "[19B]": 1216,
1406
+ "[11CH-]": 1217,
1407
+ "[9CH]": 1218,
1408
+ "[20OH]": 1219,
1409
+ "[25OH]": 1220,
1410
+ "[8cH]": 1221,
1411
+ "[TiH+3]": 1222,
1412
+ "[SnH6+3]": 1223,
1413
+ "[N@H+]": 1224,
1414
+ "[52Mn+2]": 1225,
1415
+ "[64Ga]": 1226,
1416
+ "[13B]": 1227,
1417
+ "[216Bi]": 1228,
1418
+ "[117Sn+2]": 1229,
1419
+ "[232Th]": 1230,
1420
+ "[SnH+2]": 1231,
1421
+ "[BiH5]": 1232,
1422
+ "[77Kr]": 1233,
1423
+ "[103Cd]": 1234,
1424
+ "[62Ni]": 1235,
1425
+ "[LaH3]": 1236,
1426
+ "[SmH3]": 1237,
1427
+ "[EuH3]": 1238,
1428
+ "[MoH5]": 1239,
1429
+ "[64Ni]": 1240,
1430
+ "[66Zn]": 1241,
1431
+ "[68Zn]": 1242,
1432
+ "[186W]": 1243,
1433
+ "[FeH4]": 1244,
1434
+ "[MoH4]": 1245,
1435
+ "[HgH2]": 1246,
1436
+ "[15NH2-]": 1247,
1437
+ "[UH2]": 1248,
1438
+ "[204Hg]": 1249,
1439
+ "[GaH4-]": 1250,
1440
+ "[ThH4]": 1251,
1441
+ "[WH6]": 1252,
1442
+ "[PtH4]": 1253,
1443
+ "[VH2]": 1254,
1444
+ "[UH3]": 1255,
1445
+ "[FeH3]": 1256,
1446
+ "[RuH5]": 1257,
1447
+ "[BiH4]": 1258,
1448
+ "[80Br-]": 1259,
1449
+ "[CeH3]": 1260,
1450
+ "[37ClH]": 1261,
1451
+ "[157Gd+3]": 1262,
1452
+ "[205Tl]": 1263,
1453
+ "[203Tl]": 1264,
1454
+ "[62Cu+]": 1265,
1455
+ "[64Cu+]": 1266,
1456
+ "[61Cu+]": 1267,
1457
+ "[37SH2]": 1268,
1458
+ "[30Si]": 1269,
1459
+ "[28Al]": 1270,
1460
+ "[19OH2]": 1271,
1461
+ "[8He]": 1272,
1462
+ "[6He]": 1273,
1463
+ "[153Pm]": 1274,
1464
+ "[209Bi]": 1275,
1465
+ "[66Zn+2]": 1276,
1466
+ "[10CH4]": 1277,
1467
+ "[191Ir]": 1278,
1468
+ "[66Cu]": 1279,
1469
+ "[16O+]": 1280,
1470
+ "[25O]": 1281,
1471
+ "[10c]": 1282,
1472
+ "[Co-3]": 1283,
1473
+ "[Sn@@]": 1284,
1474
+ "[17OH-]": 1285,
1475
+ "[206Po]": 1286,
1476
+ "[204Po]": 1287,
1477
+ "[202Po]": 1288,
1478
+ "[201Po]": 1289,
1479
+ "[200Po]": 1290,
1480
+ "[199Po]": 1291,
1481
+ "[198Po]": 1292,
1482
+ "[197Po]": 1293,
1483
+ "[196Po]": 1294,
1484
+ "[195Po]": 1295,
1485
+ "[194Po]": 1296,
1486
+ "[193Po]": 1297,
1487
+ "[192Po]": 1298,
1488
+ "[191Po]": 1299,
1489
+ "[190Po]": 1300,
1490
+ "[217Po]": 1301,
1491
+ "[BiH4-]": 1302,
1492
+ "[TeH4]": 1303,
1493
+ "[222Ra]": 1304,
1494
+ "[62Ga]": 1305,
1495
+ "[39Ar]": 1306,
1496
+ "[144Sm]": 1307,
1497
+ "[58Fe]": 1308,
1498
+ "[153Eu]": 1309,
1499
+ "[85Rb]": 1310,
1500
+ "[171Yb]": 1311,
1501
+ "[172Yb]": 1312,
1502
+ "[114Cd]": 1313,
1503
+ "[51Fe]": 1314,
1504
+ "[142Ce]": 1315,
1505
+ "[207Tl]": 1316,
1506
+ "[92Mo]": 1317,
1507
+ "[115Sn]": 1318,
1508
+ "[140Ce]": 1319,
1509
+ "[202Hg]": 1320,
1510
+ "[180W]": 1321,
1511
+ "[182W]": 1322,
1512
+ "[183W]": 1323,
1513
+ "[184W]": 1324,
1514
+ "[96Mo]": 1325,
1515
+ "[47Ti]": 1326,
1516
+ "[111Cd]": 1327,
1517
+ "[143Nd]": 1328,
1518
+ "[145Nd]": 1329,
1519
+ "[126Te]": 1330,
1520
+ "[128Te]": 1331,
1521
+ "[130Te]": 1332,
1522
+ "[185Re]": 1333,
1523
+ "[97Mo]": 1334,
1524
+ "[98Mo]": 1335,
1525
+ "[183Re]": 1336,
1526
+ "[52V]": 1337,
1527
+ "[80Se]": 1338,
1528
+ "[87Kr]": 1339,
1529
+ "[137Xe]": 1340,
1530
+ "[196Au]": 1341,
1531
+ "[146Ce]": 1342,
1532
+ "[88Kr]": 1343,
1533
+ "[51Ti]": 1344,
1534
+ "[138Xe]": 1345,
1535
+ "[112Cd]": 1346,
1536
+ "[116Sn]": 1347,
1537
+ "[120Sn]": 1348,
1538
+ "[28SiH3]": 1349,
1539
+ "[35S-]": 1350,
1540
+ "[15NH-]": 1351,
1541
+ "[13CH3+]": 1352,
1542
+ "[34S+]": 1353,
1543
+ "[34s]": 1354,
1544
+ "[SiH4-]": 1355,
1545
+ "[100Tc+5]": 1356,
1546
+ "[NiH2+2]": 1357,
1547
+ "[239Th]": 1358,
1548
+ "[186Lu]": 1359,
1549
+ "[AuH3]": 1360,
1550
+ "[I@@-]": 1361,
1551
+ "[XeH2]": 1362,
1552
+ "[B+]": 1363,
1553
+ "[16CH2]": 1364,
1554
+ "[8C]": 1365,
1555
+ "[TaH5]": 1366,
1556
+ "[FeH4-]": 1367,
1557
+ "[19C@H]": 1368,
1558
+ "[10NH]": 1369,
1559
+ "[FeH6-3]": 1370,
1560
+ "[22CH]": 1371,
1561
+ "[25N]": 1372,
1562
+ "[25N+]": 1373,
1563
+ "[25N-]": 1374,
1564
+ "[21CH2]": 1375,
1565
+ "[18cH]": 1376,
1566
+ "[113I]": 1377,
1567
+ "[ScH3]": 1378,
1568
+ "[30PH3]": 1379,
1569
+ "[43Ca+2]": 1380,
1570
+ "[41Ca+2]": 1381,
1571
+ "[106Cd]": 1382,
1572
+ "[122Sn]": 1383,
1573
+ "[18CH3]": 1384,
1574
+ "[58Co+3]": 1385,
1575
+ "[98Tc+4]": 1386,
1576
+ "[70Ge]": 1387,
1577
+ "[76Ge]": 1388,
1578
+ "[108Cd]": 1389,
1579
+ "[116Cd]": 1390,
1580
+ "[130Xe]": 1391,
1581
+ "[94Mo]": 1392,
1582
+ "[124Sn]": 1393,
1583
+ "[186Os]": 1394,
1584
+ "[188Os]": 1395,
1585
+ "[190Os]": 1396,
1586
+ "[192Os]": 1397,
1587
+ "[106Pd]": 1398,
1588
+ "[110Pd]": 1399,
1589
+ "[120Te]": 1400,
1590
+ "[132Ba]": 1401,
1591
+ "[134Ba]": 1402,
1592
+ "[136Ba]": 1403,
1593
+ "[136Ce]": 1404,
1594
+ "[138Ce]": 1405,
1595
+ "[156Dy]": 1406,
1596
+ "[158Dy]": 1407,
1597
+ "[160Dy]": 1408,
1598
+ "[163Dy]": 1409,
1599
+ "[162Er]": 1410,
1600
+ "[164Er]": 1411,
1601
+ "[167Er]": 1412,
1602
+ "[176Hf]": 1413,
1603
+ "[26Mg]": 1414,
1604
+ "[144Nd]": 1415,
1605
+ "[150Nd]": 1416,
1606
+ "[41K]": 1417,
1607
+ "[46Ti]": 1418,
1608
+ "[48Ti]": 1419,
1609
+ "[49Ti]": 1420,
1610
+ "[50Ti]": 1421,
1611
+ "[170Yb]": 1422,
1612
+ "[173Yb]": 1423,
1613
+ "[91Zr]": 1424,
1614
+ "[92Zr]": 1425,
1615
+ "[96Zr]": 1426,
1616
+ "[34S-]": 1427,
1617
+ "[CuH2-]": 1428,
1618
+ "[38Cl]": 1429,
1619
+ "[25Mg]": 1430,
1620
+ "[51V]": 1431,
1621
+ "[93Nb]": 1432,
1622
+ "[95Mo]": 1433,
1623
+ "[45Sc]": 1434,
1624
+ "[123Sb]": 1435,
1625
+ "[139La]": 1436,
1626
+ "[9Be]": 1437,
1627
+ "[99Y+3]": 1438,
1628
+ "[99Y]": 1439,
1629
+ "[156Ho]": 1440,
1630
+ "[67Zn]": 1441,
1631
+ "[144Ce+4]": 1442,
1632
+ "[210Tl]": 1443,
1633
+ "[42Ca]": 1444,
1634
+ "[54Fe]": 1445,
1635
+ "[193Ir]": 1446,
1636
+ "[92Nb]": 1447,
1637
+ "[141Cs]": 1448,
1638
+ "[52Cr]": 1449,
1639
+ "[35ClH]": 1450,
1640
+ "[46Ca]": 1451,
1641
+ "[139Cs]": 1452,
1642
+ "[65Cu]": 1453,
1643
+ "[71Ga]": 1454,
1644
+ "[60Ni]": 1455,
1645
+ "[16NH3]": 1456,
1646
+ "[148Nd]": 1457,
1647
+ "[72Ge]": 1458,
1648
+ "[161Dy]": 1459,
1649
+ "[49Ca]": 1460,
1650
+ "[43Ca]": 1461,
1651
+ "[8Be]": 1462,
1652
+ "[48Ca]": 1463,
1653
+ "[44Ca]": 1464,
1654
+ "[120Xe]": 1465,
1655
+ "[80Rb]": 1466,
1656
+ "[215At]": 1467,
1657
+ "[180Re]": 1468,
1658
+ "[146Sm]": 1469,
1659
+ "[19Ne]": 1470,
1660
+ "[74Kr]": 1471,
1661
+ "[134La]": 1472,
1662
+ "[76Kr]": 1473,
1663
+ "[219Fr]": 1474,
1664
+ "[121Xe]": 1475,
1665
+ "[220Fr]": 1476,
1666
+ "[216At]": 1477,
1667
+ "[223Ac]": 1478,
1668
+ "[218At]": 1479,
1669
+ "[37Ar]": 1480,
1670
+ "[135I]": 1481,
1671
+ "[110Cd]": 1482,
1672
+ "[94Tc+7]": 1483,
1673
+ "[86Y+3]": 1484,
1674
+ "[135I-]": 1485,
1675
+ "[15O-2]": 1486,
1676
+ "[151Eu+3]": 1487,
1677
+ "[161Tb+3]": 1488,
1678
+ "[197Hg+2]": 1489,
1679
+ "[109Cd+2]": 1490,
1680
+ "[191Os+4]": 1491,
1681
+ "[170Tm+3]": 1492,
1682
+ "[205Bi+3]": 1493,
1683
+ "[233U+4]": 1494,
1684
+ "[126Sb+3]": 1495,
1685
+ "[127Sb+3]": 1496,
1686
+ "[132Cs+]": 1497,
1687
+ "[136Eu+3]": 1498,
1688
+ "[136Eu]": 1499,
1689
+ "[125Sn+4]": 1500,
1690
+ "[175Yb+3]": 1501,
1691
+ "[100Mo]": 1502,
1692
+ "[22Ne]": 1503,
1693
+ "[13c-]": 1504,
1694
+ "[13NH4+]": 1505,
1695
+ "[17C]": 1506,
1696
+ "[9C]": 1507,
1697
+ "[31S]": 1508,
1698
+ "[31SH]": 1509,
1699
+ "[133I]": 1510,
1700
+ "[126I]": 1511,
1701
+ "[36SH]": 1512,
1702
+ "[30S]": 1513,
1703
+ "[32SH]": 1514,
1704
+ "[19CH2]": 1515,
1705
+ "[19c]": 1516,
1706
+ "[18c]": 1517,
1707
+ "[15F]": 1518,
1708
+ "[10C]": 1519,
1709
+ "[RuH-]": 1520,
1710
+ "[62Zn+2]": 1521,
1711
+ "[32ClH]": 1522,
1712
+ "[33ClH]": 1523,
1713
+ "[78BrH]": 1524,
1714
+ "[12Li+]": 1525,
1715
+ "[12Li]": 1526,
1716
+ "[233Ra]": 1527,
1717
+ "[68Ge+4]": 1528,
1718
+ "[44Sc+3]": 1529,
1719
+ "[91Y+3]": 1530,
1720
+ "[106Ru+3]": 1531,
1721
+ "[PoH2]": 1532,
1722
+ "[AtH]": 1533,
1723
+ "[55Fe]": 1534,
1724
+ "[233U]": 1535,
1725
+ "[210PoH2]": 1536,
1726
+ "[230Th]": 1537,
1727
+ "[228Th]": 1538,
1728
+ "[222Rn]": 1539,
1729
+ "[35SH2]": 1540,
1730
+ "[227Th]": 1541,
1731
+ "[192Ir]": 1542,
1732
+ "[133Xe]": 1543,
1733
+ "[81Kr]": 1544,
1734
+ "[95Zr]": 1545,
1735
+ "[240Pu]": 1546,
1736
+ "[54Mn]": 1547,
1737
+ "[103Ru]": 1548,
1738
+ "[95Nb]": 1549,
1739
+ "[109Cd]": 1550,
1740
+ "[141Ce]": 1551,
1741
+ "[85Kr]": 1552,
1742
+ "[110Ag]": 1553,
1743
+ "[58Co]": 1554,
1744
+ "[241Pu]": 1555,
1745
+ "[234Th]": 1556,
1746
+ "[140La]": 1557,
1747
+ "[63Ni]": 1558,
1748
+ "[152Eu]": 1559,
1749
+ "[132IH]": 1560,
1750
+ "[226Rn]": 1561,
1751
+ "[154Eu]": 1562,
1752
+ "[36ClH]": 1563,
1753
+ "[228Ac]": 1564,
1754
+ "[155Eu]": 1565,
1755
+ "[106Rh]": 1566,
1756
+ "[243Am]": 1567,
1757
+ "[227Ac]": 1568,
1758
+ "[243Cm]": 1569,
1759
+ "[236U]": 1570,
1760
+ "[144Pr]": 1571,
1761
+ "[232U]": 1572,
1762
+ "[32SH2]": 1573,
1763
+ "[88Y]": 1574,
1764
+ "[82BrH]": 1575,
1765
+ "[135IH]": 1576,
1766
+ "[242Cm]": 1577,
1767
+ "[115Cd]": 1578,
1768
+ "[242Pu]": 1579,
1769
+ "[46Sc]": 1580,
1770
+ "[56Mn]": 1581,
1771
+ "[234Pa]": 1582,
1772
+ "[41Ar]": 1583,
1773
+ "[147Nd]": 1584,
1774
+ "[187W]": 1585,
1775
+ "[151Sm]": 1586,
1776
+ "[59Ni]": 1587,
1777
+ "[233Pa]": 1588,
1778
+ "[52Mn]": 1589,
1779
+ "[94Nb]": 1590,
1780
+ "[219Rn]": 1591,
1781
+ "[236Pu]": 1592,
1782
+ "[13NH3]": 1593,
1783
+ "[93Zr]": 1594,
1784
+ "[51Cr+6]": 1595,
1785
+ "[TlH3]": 1596,
1786
+ "[123Xe]": 1597,
1787
+ "[160Tb]": 1598,
1788
+ "[170Tm]": 1599,
1789
+ "[182Ta]": 1600,
1790
+ "[175Yb]": 1601,
1791
+ "[93Mo]": 1602,
1792
+ "[143Ce]": 1603,
1793
+ "[191Os]": 1604,
1794
+ "[126IH]": 1605,
1795
+ "[48V]": 1606,
1796
+ "[113Cd]": 1607,
1797
+ "[47Sc]": 1608,
1798
+ "[181Hf]": 1609,
1799
+ "[185W]": 1610,
1800
+ "[143Pr]": 1611,
1801
+ "[191Pt]": 1612,
1802
+ "[181W]": 1613,
1803
+ "[33PH3]": 1614,
1804
+ "[97Ru]": 1615,
1805
+ "[97Tc]": 1616,
1806
+ "[111Ag]": 1617,
1807
+ "[169Er]": 1618,
1808
+ "[107Pd]": 1619,
1809
+ "[103Ru+2]": 1620,
1810
+ "[34SH2]": 1621,
1811
+ "[137Ce]": 1622,
1812
+ "[242Am]": 1623,
1813
+ "[117SnH2]": 1624,
1814
+ "[57Ni]": 1625,
1815
+ "[239U]": 1626,
1816
+ "[60Cu]": 1627,
1817
+ "[250Cf]": 1628,
1818
+ "[193Au]": 1629,
1819
+ "[69Zn]": 1630,
1820
+ "[55Co]": 1631,
1821
+ "[139Ce]": 1632,
1822
+ "[127Xe]": 1633,
1823
+ "[159Gd]": 1634,
1824
+ "[56Co]": 1635,
1825
+ "[177Hf]": 1636,
1826
+ "[244Pu]": 1637,
1827
+ "[38ClH]": 1638,
1828
+ "[142Pr]": 1639,
1829
+ "[199Hg]": 1640,
1830
+ "[179Hf]": 1641,
1831
+ "[178Hf]": 1642,
1832
+ "[237U]": 1643,
1833
+ "[156Eu]": 1644,
1834
+ "[157Eu]": 1645,
1835
+ "[105Ru]": 1646,
1836
+ "[171Tm]": 1647,
1837
+ "[199Au]": 1648,
1838
+ "[155Sm]": 1649,
1839
+ "[80BrH]": 1650,
1840
+ "[108Ag]": 1651,
1841
+ "[128IH]": 1652,
1842
+ "[48Sc]": 1653,
1843
+ "[45Ti]": 1654,
1844
+ "[176Lu]": 1655,
1845
+ "[121SnH2]": 1656,
1846
+ "[148Pm]": 1657,
1847
+ "[57Fe]": 1658,
1848
+ "[10BH3]": 1659,
1849
+ "[96Tc]": 1660,
1850
+ "[133IH]": 1661,
1851
+ "[143Pm]": 1662,
1852
+ "[105Rh]": 1663,
1853
+ "[130IH]": 1664,
1854
+ "[134IH]": 1665,
1855
+ "[131IH]": 1666,
1856
+ "[71Zn]": 1667,
1857
+ "[105Ag]": 1668,
1858
+ "[97Zr]": 1669,
1859
+ "[235Pu]": 1670,
1860
+ "[231Th]": 1671,
1861
+ "[109Pd]": 1672,
1862
+ "[93Y]": 1673,
1863
+ "[190Ir]": 1674,
1864
+ "[135Xe]": 1675,
1865
+ "[53Mn]": 1676,
1866
+ "[134Ce]": 1677,
1867
+ "[234Np]": 1678,
1868
+ "[240Am]": 1679,
1869
+ "[246Cf]": 1680,
1870
+ "[240Cm]": 1681,
1871
+ "[241Cm]": 1682,
1872
+ "[226Th]": 1683,
1873
+ "[39ClH]": 1684,
1874
+ "[229Th]": 1685,
1875
+ "[245Cm]": 1686,
1876
+ "[240U]": 1687,
1877
+ "[240Np]": 1688,
1878
+ "[249Cm]": 1689,
1879
+ "[243Pu]": 1690,
1880
+ "[145Pm]": 1691,
1881
+ "[199Pt]": 1692,
1882
+ "[246Bk]": 1693,
1883
+ "[193Pt]": 1694,
1884
+ "[230U]": 1695,
1885
+ "[250Cm]": 1696,
1886
+ "[44Ti]": 1697,
1887
+ "[175Hf]": 1698,
1888
+ "[254Fm]": 1699,
1889
+ "[255Fm]": 1700,
1890
+ "[257Fm]": 1701,
1891
+ "[92Y]": 1702,
1892
+ "[188Ir]": 1703,
1893
+ "[171Lu]": 1704,
1894
+ "[257Md]": 1705,
1895
+ "[247Bk]": 1706,
1896
+ "[121IH]": 1707,
1897
+ "[250Bk]": 1708,
1898
+ "[179Lu]": 1709,
1899
+ "[224Ac]": 1710,
1900
+ "[195Hg]": 1711,
1901
+ "[244Am]": 1712,
1902
+ "[246Pu]": 1713,
1903
+ "[194Au]": 1714,
1904
+ "[252Fm]": 1715,
1905
+ "[173Hf]": 1716,
1906
+ "[246Cm]": 1717,
1907
+ "[135Ce]": 1718,
1908
+ "[49Cr]": 1719,
1909
+ "[248Cf]": 1720,
1910
+ "[247Cm]": 1721,
1911
+ "[248Cm]": 1722,
1912
+ "[174Ta]": 1723,
1913
+ "[176Ta]": 1724,
1914
+ "[154Tb]": 1725,
1915
+ "[172Ta]": 1726,
1916
+ "[177Ta]": 1727,
1917
+ "[175Ta]": 1728,
1918
+ "[180Ta]": 1729,
1919
+ "[158Tb]": 1730,
1920
+ "[115Ag]": 1731,
1921
+ "[189Os]": 1732,
1922
+ "[251Cf]": 1733,
1923
+ "[145Pr]": 1734,
1924
+ "[147Pr]": 1735,
1925
+ "[76BrH]": 1736,
1926
+ "[102Rh]": 1737,
1927
+ "[238Np]": 1738,
1928
+ "[185Os]": 1739,
1929
+ "[246Am]": 1740,
1930
+ "[233Np]": 1741,
1931
+ "[166Dy]": 1742,
1932
+ "[254Es]": 1743,
1933
+ "[244Cf]": 1744,
1934
+ "[193Os]": 1745,
1935
+ "[245Am]": 1746,
1936
+ "[245Bk]": 1747,
1937
+ "[239Am]": 1748,
1938
+ "[238Am]": 1749,
1939
+ "[97Nb]": 1750,
1940
+ "[245Pu]": 1751,
1941
+ "[254Cf]": 1752,
1942
+ "[188W]": 1753,
1943
+ "[250Es]": 1754,
1944
+ "[251Es]": 1755,
1945
+ "[237Am]": 1756,
1946
+ "[182Hf]": 1757,
1947
+ "[258Md]": 1758,
1948
+ "[232Np]": 1759,
1949
+ "[238Cm]": 1760,
1950
+ "[60Fe]": 1761,
1951
+ "[109Pd+2]": 1762,
1952
+ "[234Pu]": 1763,
1953
+ "[141Ce+3]": 1764,
1954
+ "[136Nd]": 1765,
1955
+ "[136Pr]": 1766,
1956
+ "[173Ta]": 1767,
1957
+ "[110Ru]": 1768,
1958
+ "[147Tb]": 1769,
1959
+ "[253Fm]": 1770,
1960
+ "[139Nd]": 1771,
1961
+ "[178Re]": 1772,
1962
+ "[177Re]": 1773,
1963
+ "[200Au]": 1774,
1964
+ "[182Re]": 1775,
1965
+ "[156Tb]": 1776,
1966
+ "[155Tb]": 1777,
1967
+ "[157Tb]": 1778,
1968
+ "[161Tb]": 1779,
1969
+ "[161Ho]": 1780,
1970
+ "[167Tm]": 1781,
1971
+ "[173Lu]": 1782,
1972
+ "[179Ta]": 1783,
1973
+ "[171Er]": 1784,
1974
+ "[44Sc]": 1785,
1975
+ "[49Sc]": 1786,
1976
+ "[49V]": 1787,
1977
+ "[51Mn]": 1788,
1978
+ "[90Nb]": 1789,
1979
+ "[88Nb]": 1790,
1980
+ "[88Zr]": 1791,
1981
+ "[36SH2]": 1792,
1982
+ "[174Yb]": 1793,
1983
+ "[178Lu]": 1794,
1984
+ "[179W]": 1795,
1985
+ "[83BrH]": 1796,
1986
+ "[107Cd]": 1797,
1987
+ "[75BrH]": 1798,
1988
+ "[62Co]": 1799,
1989
+ "[48Cr]": 1800,
1990
+ "[63Zn]": 1801,
1991
+ "[102Ag]": 1802,
1992
+ "[154Sm]": 1803,
1993
+ "[168Er]": 1804,
1994
+ "[65Ni]": 1805,
1995
+ "[137La]": 1806,
1996
+ "[187Ir]": 1807,
1997
+ "[144Pm]": 1808,
1998
+ "[146Pm]": 1809,
1999
+ "[160Gd]": 1810,
2000
+ "[166Yb]": 1811,
2001
+ "[162Dy]": 1812,
2002
+ "[47V]": 1813,
2003
+ "[141Nd]": 1814,
2004
+ "[141Sm]": 1815,
2005
+ "[166Er]": 1816,
2006
+ "[150Sm]": 1817,
2007
+ "[146Eu]": 1818,
2008
+ "[149Eu]": 1819,
2009
+ "[174Lu]": 1820,
2010
+ "[17NH3]": 1821,
2011
+ "[102Ru]": 1822,
2012
+ "[170Hf]": 1823,
2013
+ "[188Pt]": 1824,
2014
+ "[61Ni]": 1825,
2015
+ "[56Ni]": 1826,
2016
+ "[149Gd]": 1827,
2017
+ "[151Gd]": 1828,
2018
+ "[141Pm]": 1829,
2019
+ "[147Gd]": 1830,
2020
+ "[146Gd]": 1831,
2021
+ "[161Er]": 1832,
2022
+ "[103Ag]": 1833,
2023
+ "[145Eu]": 1834,
2024
+ "[153Tb]": 1835,
2025
+ "[155Dy]": 1836,
2026
+ "[184Re]": 1837,
2027
+ "[180Os]": 1838,
2028
+ "[182Os]": 1839,
2029
+ "[186Pt]": 1840,
2030
+ "[181Os]": 1841,
2031
+ "[181Re]": 1842,
2032
+ "[151Tb]": 1843,
2033
+ "[178Ta]": 1844,
2034
+ "[178W]": 1845,
2035
+ "[189Pt]": 1846,
2036
+ "[194Hg]": 1847,
2037
+ "[145Sm]": 1848,
2038
+ "[150Tb]": 1849,
2039
+ "[132La]": 1850,
2040
+ "[158Gd]": 1851,
2041
+ "[104Ag]": 1852,
2042
+ "[193Hg]": 1853,
2043
+ "[94Ru]": 1854,
2044
+ "[137Pr]": 1855,
2045
+ "[155Ho]": 1856,
2046
+ "[117Cd]": 1857,
2047
+ "[99Ru]": 1858,
2048
+ "[146Nd]": 1859,
2049
+ "[218Rn]": 1860,
2050
+ "[95Y]": 1861,
2051
+ "[79Kr]": 1862,
2052
+ "[120IH]": 1863,
2053
+ "[138Pr]": 1864,
2054
+ "[100Pd]": 1865,
2055
+ "[166Tm]": 1866,
2056
+ "[90Mo]": 1867,
2057
+ "[151Nd]": 1868,
2058
+ "[231U]": 1869,
2059
+ "[138Nd]": 1870,
2060
+ "[89Nb]": 1871,
2061
+ "[98Nb]": 1872,
2062
+ "[162Ho]": 1873,
2063
+ "[142Sm]": 1874,
2064
+ "[186Ta]": 1875,
2065
+ "[104Tc]": 1876,
2066
+ "[184Ta]": 1877,
2067
+ "[185Ta]": 1878,
2068
+ "[170Er]": 1879,
2069
+ "[107Rh]": 1880,
2070
+ "[131La]": 1881,
2071
+ "[169Lu]": 1882,
2072
+ "[74BrH]": 1883,
2073
+ "[150Pm]": 1884,
2074
+ "[172Tm]": 1885,
2075
+ "[197Pt]": 1886,
2076
+ "[230Pu]": 1887,
2077
+ "[170Lu]": 1888,
2078
+ "[86Zr]": 1889,
2079
+ "[176W]": 1890,
2080
+ "[177W]": 1891,
2081
+ "[101Pd]": 1892,
2082
+ "[105Pd]": 1893,
2083
+ "[108Pd]": 1894,
2084
+ "[149Nd]": 1895,
2085
+ "[164Ho]": 1896,
2086
+ "[159Ho]": 1897,
2087
+ "[167Ho]": 1898,
2088
+ "[176Yb]": 1899,
2089
+ "[156Sm]": 1900,
2090
+ "[77BrH]": 1901,
2091
+ "[189Re]": 1902,
2092
+ "[99Rh]": 1903,
2093
+ "[100Rh]": 1904,
2094
+ "[151Pm]": 1905,
2095
+ "[232Pa]": 1906,
2096
+ "[228Pa]": 1907,
2097
+ "[230Pa]": 1908,
2098
+ "[66Ni]": 1909,
2099
+ "[194Os]": 1910,
2100
+ "[135La]": 1911,
2101
+ "[138La]": 1912,
2102
+ "[141La]": 1913,
2103
+ "[142La]": 1914,
2104
+ "[195Ir]": 1915,
2105
+ "[96Nb]": 1916,
2106
+ "[157Ho]": 1917,
2107
+ "[183Hf]": 1918,
2108
+ "[162Tm]": 1919,
2109
+ "[172Er]": 1920,
2110
+ "[148Eu]": 1921,
2111
+ "[150Eu]": 1922,
2112
+ "[15CH4]": 1923,
2113
+ "[89Kr]": 1924,
2114
+ "[143La]": 1925,
2115
+ "[58Ni]": 1926,
2116
+ "[61Co]": 1927,
2117
+ "[158Eu]": 1928,
2118
+ "[165Er]": 1929,
2119
+ "[167Yb]": 1930,
2120
+ "[173Tm]": 1931,
2121
+ "[175Tm]": 1932,
2122
+ "[172Hf]": 1933,
2123
+ "[172Lu]": 1934,
2124
+ "[93Tc]": 1935,
2125
+ "[177Yb]": 1936,
2126
+ "[124IH]": 1937,
2127
+ "[194Ir]": 1938,
2128
+ "[147Eu]": 1939,
2129
+ "[101Mo]": 1940,
2130
+ "[180Hf]": 1941,
2131
+ "[189Ir]": 1942,
2132
+ "[87Y]": 1943,
2133
+ "[43Sc]": 1944,
2134
+ "[195Au]": 1945,
2135
+ "[112Ag]": 1946,
2136
+ "[84BrH]": 1947,
2137
+ "[106Ag]": 1948,
2138
+ "[109Ag]": 1949,
2139
+ "[101Rh]": 1950,
2140
+ "[162Yb]": 1951,
2141
+ "[228Rn]": 1952,
2142
+ "[139Pr]": 1953,
2143
+ "[94Y]": 1954,
2144
+ "[201Au]": 1955,
2145
+ "[40PH3]": 1956,
2146
+ "[110Ag+]": 1957,
2147
+ "[104Cd]": 1958,
2148
+ "[133Ba+2]": 1959,
2149
+ "[226Ac]": 1960,
2150
+ "[145Gd]": 1961,
2151
+ "[186Ir]": 1962,
2152
+ "[184Ir]": 1963,
2153
+ "[224Rn]": 1964,
2154
+ "[185Ir]": 1965,
2155
+ "[182Ir]": 1966,
2156
+ "[184Hf]": 1967,
2157
+ "[200Pt]": 1968,
2158
+ "[227Pa]": 1969,
2159
+ "[178Yb]": 1970,
2160
+ "[72Br-]": 1971,
2161
+ "[72BrH]": 1972,
2162
+ "[248Am]": 1973,
2163
+ "[238Th]": 1974,
2164
+ "[161Gd]": 1975,
2165
+ "[35S-2]": 1976,
2166
+ "[107Ag]": 1977,
2167
+ "[FeH6-4]": 1978,
2168
+ "[89Sr]": 1979,
2169
+ "[SnH3-]": 1980,
2170
+ "[SeH3]": 1981,
2171
+ "[TeH3+]": 1982,
2172
+ "[SbH4+]": 1983,
2173
+ "[AsH4+]": 1984,
2174
+ "[4He]": 1985,
2175
+ "[AsH3-]": 1986,
2176
+ "[1HH]": 1987,
2177
+ "[3H+]": 1988,
2178
+ "[82Rb]": 1989,
2179
+ "[85Sr]": 1990,
2180
+ "[90Sr]": 1991,
2181
+ "[137Cs]": 1992,
2182
+ "[133Ba]": 1993,
2183
+ "[131Cs]": 1994,
2184
+ "[SbH5]": 1995,
2185
+ "[224Ra]": 1996,
2186
+ "[22Na]": 1997,
2187
+ "[210Bi]": 1998,
2188
+ "[214Bi]": 1999,
2189
+ "[228Ra]": 2000,
2190
+ "[127Sb]": 2001,
2191
+ "[136Cs]": 2002,
2192
+ "[125Sb]": 2003,
2193
+ "[134Cs]": 2004,
2194
+ "[140Ba]": 2005,
2195
+ "[45Ca]": 2006,
2196
+ "[206Pb]": 2007,
2197
+ "[207Pb]": 2008,
2198
+ "[24Na]": 2009,
2199
+ "[86Rb]": 2010,
2200
+ "[212Bi]": 2011,
2201
+ "[208Pb]": 2012,
2202
+ "[124Sb]": 2013,
2203
+ "[204Pb]": 2014,
2204
+ "[44K]": 2015,
2205
+ "[129Te]": 2016,
2206
+ "[113Sn]": 2017,
2207
+ "[204Tl]": 2018,
2208
+ "[87Sr]": 2019,
2209
+ "[208Tl]": 2020,
2210
+ "[87Rb]": 2021,
2211
+ "[47Ca]": 2022,
2212
+ "[135Cs]": 2023,
2213
+ "[216Po]": 2024,
2214
+ "[137Ba]": 2025,
2215
+ "[207Bi]": 2026,
2216
+ "[212Po]": 2027,
2217
+ "[79Se]": 2028,
2218
+ "[223Ra]": 2029,
2219
+ "[86Sr]": 2030,
2220
+ "[122Sb]": 2031,
2221
+ "[26Al]": 2032,
2222
+ "[32Si]": 2033,
2223
+ "[126Sn]": 2034,
2224
+ "[225Ra]": 2035,
2225
+ "[114In]": 2036,
2226
+ "[72Ga]": 2037,
2227
+ "[132Te]": 2038,
2228
+ "[10Be]": 2039,
2229
+ "[125Sn]": 2040,
2230
+ "[73As]": 2041,
2231
+ "[206Bi]": 2042,
2232
+ "[117Sn]": 2043,
2233
+ "[40Ca]": 2044,
2234
+ "[41Ca]": 2045,
2235
+ "[89Rb]": 2046,
2236
+ "[116In]": 2047,
2237
+ "[129Sb]": 2048,
2238
+ "[91Sr]": 2049,
2239
+ "[71Ge]": 2050,
2240
+ "[139Ba]": 2051,
2241
+ "[69Ga]": 2052,
2242
+ "[120Sb]": 2053,
2243
+ "[121Sn]": 2054,
2244
+ "[123Sn]": 2055,
2245
+ "[131Te]": 2056,
2246
+ "[77Ge]": 2057,
2247
+ "[135Ba]": 2058,
2248
+ "[82Sr]": 2059,
2249
+ "[43K]": 2060,
2250
+ "[131Ba]": 2061,
2251
+ "[92Sr]": 2062,
2252
+ "[88Rb]": 2063,
2253
+ "[129Cs]": 2064,
2254
+ "[144Cs]": 2065,
2255
+ "[127Cs]": 2066,
2256
+ "[200Tl]": 2067,
2257
+ "[202Tl]": 2068,
2258
+ "[141Ba]": 2069,
2259
+ "[117Sb]": 2070,
2260
+ "[116Sb]": 2071,
2261
+ "[78As]": 2072,
2262
+ "[131Sb]": 2073,
2263
+ "[126Sb]": 2074,
2264
+ "[128Sb]": 2075,
2265
+ "[130Sb]": 2076,
2266
+ "[67Ge]": 2077,
2267
+ "[68Ge]": 2078,
2268
+ "[78Ge]": 2079,
2269
+ "[66Ge]": 2080,
2270
+ "[223Fr]": 2081,
2271
+ "[132Cs]": 2082,
2272
+ "[125Cs]": 2083,
2273
+ "[138Cs]": 2084,
2274
+ "[133Te]": 2085,
2275
+ "[84Rb]": 2086,
2276
+ "[83Rb]": 2087,
2277
+ "[81Rb]": 2088,
2278
+ "[142Ba]": 2089,
2279
+ "[200Bi]": 2090,
2280
+ "[115Sb]": 2091,
2281
+ "[194Tl]": 2092,
2282
+ "[70Se]": 2093,
2283
+ "[112In]": 2094,
2284
+ "[118Sb]": 2095,
2285
+ "[70Ga]": 2096,
2286
+ "[27Mg]": 2097,
2287
+ "[202Bi]": 2098,
2288
+ "[83Se]": 2099,
2289
+ "[9Li]": 2100,
2290
+ "[69As]": 2101,
2291
+ "[79Rb]": 2102,
2292
+ "[81Sr]": 2103,
2293
+ "[83Sr]": 2104,
2294
+ "[78Se]": 2105,
2295
+ "[109In]": 2106,
2296
+ "[29Al]": 2107,
2297
+ "[118Sn]": 2108,
2298
+ "[117In]": 2109,
2299
+ "[119Sb]": 2110,
2300
+ "[114Sn]": 2111,
2301
+ "[138Ba]": 2112,
2302
+ "[69Ge]": 2113,
2303
+ "[73Ga]": 2114,
2304
+ "[74Ge]": 2115,
2305
+ "[206Tl]": 2116,
2306
+ "[199Tl]": 2117,
2307
+ "[130Cs]": 2118,
2308
+ "[28Mg]": 2119,
2309
+ "[116Te]": 2120,
2310
+ "[112Sn]": 2121,
2311
+ "[126Ba]": 2122,
2312
+ "[211Bi]": 2123,
2313
+ "[81Se]": 2124,
2314
+ "[127Sn]": 2125,
2315
+ "[143Cs]": 2126,
2316
+ "[134Te]": 2127,
2317
+ "[80Sr]": 2128,
2318
+ "[45K]": 2129,
2319
+ "[215Po]": 2130,
2320
+ "[207Po]": 2131,
2321
+ "[111Sn]": 2132,
2322
+ "[211Po]": 2133,
2323
+ "[128Ba]": 2134,
2324
+ "[198Tl]": 2135,
2325
+ "[227Ra]": 2136,
2326
+ "[213Po]": 2137,
2327
+ "[220Ra]": 2138,
2328
+ "[128Sn]": 2139,
2329
+ "[203Po]": 2140,
2330
+ "[205Po]": 2141,
2331
+ "[65Ga]": 2142,
2332
+ "[197Tl]": 2143,
2333
+ "[88Sr]": 2144,
2334
+ "[110In]": 2145,
2335
+ "[31Si]": 2146,
2336
+ "[201Bi]": 2147,
2337
+ "[121Te]": 2148,
2338
+ "[205Bi]": 2149,
2339
+ "[203Bi]": 2150,
2340
+ "[195Tl]": 2151,
2341
+ "[209Tl]": 2152,
2342
+ "[110Sn]": 2153,
2343
+ "[222Fr]": 2154,
2344
+ "[207At]": 2155,
2345
+ "[119In]": 2156,
2346
+ "[As@]": 2157,
2347
+ "[129IH]": 2158,
2348
+ "[157Dy]": 2159,
2349
+ "[111IH]": 2160,
2350
+ "[230Ra]": 2161,
2351
+ "[144Pr+3]": 2162,
2352
+ "[SiH3+]": 2163,
2353
+ "[3He]": 2164,
2354
+ "[AsH5]": 2165,
2355
+ "[72Se]": 2166,
2356
+ "[95Tc]": 2167,
2357
+ "[103Pd]": 2168,
2358
+ "[121Sn+2]": 2169,
2359
+ "[211Rn]": 2170,
2360
+ "[38SH2]": 2171,
2361
+ "[127IH]": 2172,
2362
+ "[74Br-]": 2173,
2363
+ "[133I-]": 2174,
2364
+ "[100Tc+4]": 2175,
2365
+ "[100Tc]": 2176,
2366
+ "[36Cl-]": 2177,
2367
+ "[89Y+3]": 2178,
2368
+ "[104Rh]": 2179,
2369
+ "[152Sm]": 2180,
2370
+ "[226Ra]": 2181,
2371
+ "[19FH]": 2182,
2372
+ "[104Pd]": 2183,
2373
+ "[148Gd]": 2184,
2374
+ "[157Lu]": 2185,
2375
+ "[33SH2]": 2186,
2376
+ "[121I-]": 2187,
2377
+ "[17FH]": 2188,
2378
+ "[71Se]": 2189,
2379
+ "[157Sm]": 2190,
2380
+ "[148Tb]": 2191,
2381
+ "[164Dy]": 2192,
2382
+ "[15OH2]": 2193,
2383
+ "[15O+]": 2194,
2384
+ "[39K]": 2195,
2385
+ "[40Ar]": 2196,
2386
+ "[50Cr+3]": 2197,
2387
+ "[50Cr]": 2198,
2388
+ "[52Ti]": 2199,
2389
+ "[103Pd+2]": 2200,
2390
+ "[130Ba]": 2201,
2391
+ "[142Pm]": 2202,
2392
+ "[153Gd+3]": 2203,
2393
+ "[151Eu]": 2204,
2394
+ "[103Rh]": 2205,
2395
+ "[124Xe]": 2206,
2396
+ "[152Tb]": 2207,
2397
+ "[17OH2]": 2208,
2398
+ "[20Ne]": 2209,
2399
+ "[52Fe]": 2210,
2400
+ "[94Zr+4]": 2211,
2401
+ "[94Zr]": 2212,
2402
+ "[149Pr]": 2213,
2403
+ "[16OH2]": 2214,
2404
+ "[53Cr+6]": 2215,
2405
+ "[53Cr]": 2216,
2406
+ "[81Br-]": 2217,
2407
+ "[112Pd]": 2218,
2408
+ "[125Xe]": 2219,
2409
+ "[155Gd]": 2220,
2410
+ "[157Gd]": 2221,
2411
+ "[168Yb]": 2222,
2412
+ "[184Os]": 2223,
2413
+ "[166Tb]": 2224,
2414
+ "[221Fr]": 2225,
2415
+ "[212Ra]": 2226,
2416
+ "[75Br-]": 2227,
2417
+ "[79Br-]": 2228,
2418
+ "[113Ag]": 2229,
2419
+ "[23Na]": 2230,
2420
+ "[34Cl-]": 2231,
2421
+ "[34ClH]": 2232,
2422
+ "[38Cl-]": 2233,
2423
+ "[56Fe]": 2234,
2424
+ "[68Cu]": 2235,
2425
+ "[77Br-]": 2236,
2426
+ "[90Zr+4]": 2237,
2427
+ "[90Zr]": 2238,
2428
+ "[102Pd]": 2239,
2429
+ "[154Eu+3]": 2240,
2430
+ "[57Mn]": 2241,
2431
+ "[165Tm]": 2242,
2432
+ "[152Dy]": 2243,
2433
+ "[217At]": 2244,
2434
+ "[77se]": 2245,
2435
+ "[13cH-]": 2246,
2436
+ "[122Te]": 2247,
2437
+ "[156Gd]": 2248,
2438
+ "[124Te]": 2249,
2439
+ "[53Ni]": 2250,
2440
+ "[131Xe]": 2251,
2441
+ "[174Hf+4]": 2252,
2442
+ "[174Hf]": 2253,
2443
+ "[76Se]": 2254,
2444
+ "[168Tm]": 2255,
2445
+ "[167Dy]": 2256,
2446
+ "[154Gd]": 2257,
2447
+ "[95Ru]": 2258,
2448
+ "[210At]": 2259,
2449
+ "[85Br]": 2260,
2450
+ "[59Co]": 2261,
2451
+ "[122Xe]": 2262,
2452
+ "[27Al]": 2263,
2453
+ "[54Cr]": 2264,
2454
+ "[198Hg]": 2265,
2455
+ "[85Rb+]": 2266,
2456
+ "[214Tl]": 2267,
2457
+ "[229Rn]": 2268,
2458
+ "[218Pb]": 2269,
2459
+ "[218Bi]": 2270,
2460
+ "[167Tm+3]": 2271,
2461
+ "[18o+]": 2272,
2462
+ "[P@@H+]": 2273,
2463
+ "[P@H+]": 2274,
2464
+ "[13N+]": 2275,
2465
+ "[212Pb+2]": 2276,
2466
+ "[217Bi]": 2277,
2467
+ "[249Cf+2]": 2278,
2468
+ "[18OH3+]": 2279,
2469
+ "[90Sr-]": 2280,
2470
+ "[Cf+3]": 2281,
2471
+ "[200Hg]": 2282,
2472
+ "[86Tc]": 2283,
2473
+ "[141Pr+3]": 2284,
2474
+ "[141Pr]": 2285,
2475
+ "[16nH]": 2286,
2476
+ "[14NH4+]": 2287,
2477
+ "[132Xe]": 2288,
2478
+ "[83Kr]": 2289,
2479
+ "[70Zn+2]": 2290,
2480
+ "[137Ba+2]": 2291,
2481
+ "[36Ar]": 2292,
2482
+ "[38Ar]": 2293,
2483
+ "[21Ne]": 2294,
2484
+ "[126Xe]": 2295,
2485
+ "[136Xe]": 2296,
2486
+ "[128Xe]": 2297,
2487
+ "[134Xe]": 2298,
2488
+ "[84Kr]": 2299,
2489
+ "[86Kr]": 2300,
2490
+ "[78Kr]": 2301,
2491
+ "[80Kr]": 2302,
2492
+ "[82Kr]": 2303,
2493
+ "[67Zn+2]": 2304,
2494
+ "[65Cu+2]": 2305,
2495
+ "[110Te]": 2306,
2496
+ "[58Fe+3]": 2307,
2497
+ "[142Nd]": 2308,
2498
+ "[38K]": 2309,
2499
+ "[198Au+3]": 2310,
2500
+ "[122IH]": 2311,
2501
+ "[38PH3]": 2312,
2502
+ "[130I-]": 2313,
2503
+ "[40K+]": 2314,
2504
+ "[38K+]": 2315,
2505
+ "[28Mg+2]": 2316,
2506
+ "[208Tl+]": 2317,
2507
+ "[13OH2]": 2318,
2508
+ "[198Bi]": 2319,
2509
+ "[192Bi]": 2320,
2510
+ "[194Bi]": 2321,
2511
+ "[196Bi]": 2322,
2512
+ "[132I-]": 2323,
2513
+ "[83Sr+2]": 2324,
2514
+ "[169Er+3]": 2325,
2515
+ "[122I-]": 2326,
2516
+ "[120I-]": 2327,
2517
+ "[92Sr+2]": 2328,
2518
+ "[126I-]": 2329,
2519
+ "[24Mg]": 2330,
2520
+ "[84Sr]": 2331,
2521
+ "[118Pd+2]": 2332,
2522
+ "[118Pd]": 2333,
2523
+ "[AsH4]": 2334,
2524
+ "[127I-]": 2335,
2525
+ "[9C-]": 2336,
2526
+ "[11CH3+]": 2337,
2527
+ "[17B]": 2338,
2528
+ "[7B]": 2339,
2529
+ "[4HH]": 2340,
2530
+ "[18C-]": 2341,
2531
+ "[22CH3-]": 2342,
2532
+ "[22CH4]": 2343,
2533
+ "[17C-]": 2344,
2534
+ "[15CH3]": 2345,
2535
+ "[16CH3]": 2346,
2536
+ "[11NH3]": 2347,
2537
+ "[21NH3]": 2348,
2538
+ "[11N-]": 2349,
2539
+ "[11NH]": 2350,
2540
+ "[16CH]": 2351,
2541
+ "[17CH2]": 2352,
2542
+ "[99Ru+2]": 2353,
2543
+ "[181Ta+2]": 2354,
2544
+ "[181Ta]": 2355,
2545
+ "[20CH]": 2356,
2546
+ "[32PH2]": 2357,
2547
+ "[55Fe+2]": 2358,
2548
+ "[SH3]": 2359,
2549
+ "[S@H]": 2360,
2550
+ "[UNK]": 2361
2551
+ },
2552
+ "merges": []
2553
+ }
2554
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[MASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "2361": {
36
+ "content": "[UNK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "extra_special_tokens": {},
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 256,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "tokenizer_class": "PreTrainedTokenizerFast",
52
+ "unk_token": "[UNK]"
53
+ }