eacortes commited on
Commit
d6a8b09
·
verified ·
1 Parent(s): b0a83f5

Upload 19 files

Browse files
Files changed (19) hide show
  1. README.md +315 -3
  2. config.json +54 -0
  3. configuration_modchembert.py +84 -0
  4. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_164756.log +343 -0
  5. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_171049.log +365 -0
  6. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250918_191718.log +377 -0
  7. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250923_141720.log +323 -0
  8. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250918_185547.log +353 -0
  9. logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_173530.log +347 -0
  10. logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_171320.log +323 -0
  11. logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250918_194635.log +331 -0
  12. logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_164756.log +371 -0
  13. logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_174904.log +383 -0
  14. logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_180601.log +353 -0
  15. model.safetensors +3 -0
  16. modeling_modchembert.py +554 -0
  17. special_tokens_map.json +37 -0
  18. tokenizer.json +2554 -0
  19. tokenizer_config.json +53 -0
README.md CHANGED
@@ -1,3 +1,315 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Derify/ModChemBERT-MLM-DAPT
4
+ datasets:
5
+ - Derify/augmented_canonical_druglike_QED_Pfizer_15M
6
+ metrics:
7
+ - roc_auc
8
+ - rmse
9
+ library_name: transformers
10
+ tags:
11
+ - modernbert
12
+ - ModChemBERT
13
+ - cheminformatics
14
+ - chemical-language-model
15
+ - molecular-property-prediction
16
+ - mergekit
17
+ - merge
18
+ pipeline_tag: fill-mask
19
+ model-index:
20
+ - name: Derify/ModChemBERT-MLM
21
+ results:
22
+ - task:
23
+ type: text-classification
24
+ name: Classification (ROC AUC)
25
+ dataset:
26
+ name: BACE
27
+ type: BACE
28
+ metrics:
29
+ - type: roc_auc
30
+ value: 0.8213
31
+ - task:
32
+ type: text-classification
33
+ name: Classification (ROC AUC)
34
+ dataset:
35
+ name: BBBP
36
+ type: BBBP
37
+ metrics:
38
+ - type: roc_auc
39
+ value: 0.7356
40
+ - task:
41
+ type: text-classification
42
+ name: Classification (ROC AUC)
43
+ dataset:
44
+ name: CLINTOX
45
+ type: CLINTOX
46
+ metrics:
47
+ - type: roc_auc
48
+ value: 0.9664
49
+ - task:
50
+ type: text-classification
51
+ name: Classification (ROC AUC)
52
+ dataset:
53
+ name: HIV
54
+ type: HIV
55
+ metrics:
56
+ - type: roc_auc
57
+ value: 0.7750
58
+ - task:
59
+ type: text-classification
60
+ name: Classification (ROC AUC)
61
+ dataset:
62
+ name: SIDER
63
+ type: SIDER
64
+ metrics:
65
+ - type: roc_auc
66
+ value: 0.6415
67
+ - task:
68
+ type: text-classification
69
+ name: Classification (ROC AUC)
70
+ dataset:
71
+ name: TOX21
72
+ type: TOX21
73
+ metrics:
74
+ - type: roc_auc
75
+ value: 0.7263
76
+ - task:
77
+ type: regression
78
+ name: Regression (RMSE)
79
+ dataset:
80
+ name: BACE
81
+ type: BACE
82
+ metrics:
83
+ - type: rmse
84
+ value: 0.9713
85
+ - task:
86
+ type: regression
87
+ name: Regression (RMSE)
88
+ dataset:
89
+ name: CLEARANCE
90
+ type: CLEARANCE
91
+ metrics:
92
+ - type: rmse
93
+ value: 42.8010
94
+ - task:
95
+ type: regression
96
+ name: Regression (RMSE)
97
+ dataset:
98
+ name: ESOL
99
+ type: ESOL
100
+ metrics:
101
+ - type: rmse
102
+ value: 0.8169
103
+ - task:
104
+ type: regression
105
+ name: Regression (RMSE)
106
+ dataset:
107
+ name: FREESOLV
108
+ type: FREESOLV
109
+ metrics:
110
+ - type: rmse
111
+ value: 0.5445
112
+ - task:
113
+ type: regression
114
+ name: Regression (RMSE)
115
+ dataset:
116
+ name: LIPO
117
+ type: LIPO
118
+ metrics:
119
+ - type: rmse
120
+ value: 0.6820
121
+ ---
122
+
123
+ # ModChemBERT: ModernBERT as a Chemical Language Model
124
+ ModChemBERT is a ModernBERT-based chemical language model (CLM), trained on SMILES strings for masked language modeling (MLM) and downstream molecular property prediction (classification & regression).
125
+
126
+ ## Usage
127
+ ### Load Model
128
+ ```python
129
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
130
+
131
+ model_id = "Derify/ModChemBERT-MLM-DAPT-TAFT"
132
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
133
+ model = AutoModelForMaskedLM.from_pretrained(
134
+ model_id,
135
+ trust_remote_code=True,
136
+ dtype="float16",
137
+ device_map="auto",
138
+ )
139
+ ```
140
+
141
+ ### Fill-Mask Pipeline
142
+ ```python
143
+ from transformers import pipeline
144
+
145
+ fill = pipeline("fill-mask", model=model, tokenizer=tokenizer)
146
+ print(fill("c1ccccc1[MASK]"))
147
+ ```
148
+
149
+ ## Intended Use
150
+ * Primary: Research and development for molecular property prediction, experimentation with pooling strategies, and as a foundational model for downstream applications.
151
+ * Appropriate for: Binary / multi-class classification (e.g., toxicity, activity) and single-task or multi-task regression (e.g., solubility, clearance) after fine-tuning.
152
+ * Not intended for generating novel molecules.
153
+
154
+ ## Limitations
155
+ - Out-of-domain performance may degrade for: very long (>128 token) SMILES, inorganic / organometallic compounds, polymers, or charged / enumerated tautomers are not well represented in training.
156
+ - No guarantee of synthesizability, safety, or biological efficacy.
157
+
158
+ ## Ethical Considerations & Responsible Use
159
+ - Potential biases arise from training corpora skewed to drug-like space.
160
+ - Do not deploy in clinical or regulatory settings without rigorous, domain-specific validation.
161
+
162
+ ## Architecture
163
+ - Backbone: ModernBERT
164
+ - Hidden size: 768
165
+ - Intermediate size: 1152
166
+ - Encoder Layers: 22
167
+ - Attention heads: 12
168
+ - Max sequence length: 256 tokens (MLM primarily trained with 128-token sequences)
169
+ - Vocabulary: BPE tokenizer using [MolFormer's vocab](https://github.com/emapco/ModChemBERT/blob/main/modchembert/tokenizers/molformer/vocab.json) (2362 tokens)
170
+
171
+ ## Pooling (Classifier / Regressor Head)
172
+ Kallergis et al. [1] demonstrated that the CLM embedding method prior to the prediction head can significantly impact downstream performance.
173
+
174
+ Behrendt et al. [2] noted that the last few layers contain task-specific information and that pooling methods leveraging information from multiple layers can enhance model performance. Their results further demonstrated that the `max_seq_mha` pooling method was particularly effective in low-data regimes, which is often the case for molecular property prediction tasks.
175
+
176
+ Multiple pooling strategies are supported by ModChemBERT to explore their impact on downstream performance:
177
+ - `cls`: Last layer [CLS]
178
+ - `mean`: Mean over last hidden layer
179
+ - `max_cls`: Max over last k layers of [CLS]
180
+ - `cls_mha`: MHA with [CLS] as query
181
+ - `max_seq_mha`: MHA with max pooled sequence as KV and max pooled [CLS] as query
182
+ - `sum_mean`: Sum over all layers then mean tokens
183
+ - `sum_sum`: Sum over all layers then sum tokens
184
+ - `mean_mean`: Mean over all layers then mean tokens
185
+ - `mean_sum`: Mean over all layers then sum tokens
186
+ - `max_seq_mean`: Max over last k layers then mean tokens
187
+
188
+ ## Training Pipeline
189
+ <div align="center">
190
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/656892962693fa22e18b5331/bxNbpgMkU8m60ypyEJoWQ.png" alt="ModChemBERT Training Pipeline" width="650"/>
191
+ </div>
192
+
193
+ ### Rationale for MTR Stage
194
+ Following Sultan et al. [3], multi-task regression (physicochemical properties) biases the latent space toward ADME-related representations prior to narrow TAFT specialization. Sultan et al. observed that MLM + DAPT (MTR) outperforms MLM-only, MTR-only, and MTR + DAPT (MTR).
195
+
196
+ ### Checkpoint Averaging Motivation
197
+ Inspired by ModernBERT [4], JaColBERTv2.5 [5], and Llama 3.1 [6], where results show that model merging can enhance generalization or performance while mitigating overfitting to any single fine-tune or annealing checkpoint.
198
+
199
+ ## Datasets
200
+ - Pretraining: [Derify/augmented_canonical_druglike_QED_Pfizer_15M](https://huggingface.co/datasets/Derify/augmented_canonical_druglike_QED_Pfizer_15M)
201
+ - Domain Adaptive Pretraining (DAPT) & Task Adaptive Fine-tuning (TAFT): ADME + AstraZeneca datasets (10 tasks) with scaffold splits from DA4MT pipeline (see [domain-adaptation-molecular-transformers](https://github.com/emapco/ModChemBERT/tree/main/domain-adaptation-molecular-transformers))
202
+ - Benchmarking: ChemBERTa-3 [7] tasks (BACE, BBBP, TOX21, HIV, SIDER, CLINTOX for classification; ESOL, FREESOLV, LIPO, BACE, CLEARANCE for regression)
203
+
204
+ ## Benchmarking
205
+ Benchmarks were conducted with the ChemBERTa-3 framework using DeepChem scaffold splits. Each task was trained for 100 epochs with 3 random seeds.
206
+
207
+ ### Evaluation Methodology
208
+ - Classification Metric: ROC AUC.
209
+ - Regression Metric: RMSE.
210
+ - Aggregation: Mean ± standard deviation of the triplicate results.
211
+ - Input Constraints: SMILES truncated / filtered to ≤200 tokens, following the MolFormer paper's recommendation.
212
+
213
+ ### Results
214
+ <details><summary>Click to expand</summary>
215
+
216
+ #### Classification Datasets (ROC AUC - Higher is better)
217
+
218
+ | Model | BACE↑ | BBBP↑ | CLINTOX↑ | HIV↑ | SIDER↑ | TOX21↑ | AVG† |
219
+ | ---------------------------------------------------------------------------- | ----------------- | ----------------- | --------------------- | --------------------- | --------------------- | ----------------- | ------ |
220
+ | **Tasks** | 1 | 1 | 2 | 1 | 27 | 12 | |
221
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 0.781 ± 0.019 | 0.700 ± 0.027 | 0.979 ± 0.022 | 0.740 ± 0.013 | 0.611 ± 0.002 | 0.718 ± 0.011 | 0.7548 |
222
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 0.819 ± 0.019 | 0.735 ± 0.019 | 0.839 ± 0.013 | 0.762 ± 0.005 | 0.618 ± 0.005 | 0.723 ± 0.012 | 0.7493 |
223
+ | MoLFormer-LHPC* | **0.887 ± 0.004** | **0.908 ± 0.013** | 0.993 ± 0.004 | 0.750 ± 0.003 | 0.622 ± 0.007 | **0.791 ± 0.014** | 0.8252 |
224
+ | ------------------------- | ----------------- | ----------------- | ------------------- | ------------------- | ------------------- | ----------------- | ------ |
225
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 0.8065 ± 0.0103 | 0.7222 ± 0.0150 | 0.9709 ± 0.0227 | ***0.7800 ± 0.0133*** | 0.6419 ± 0.0113 | 0.7400 ± 0.0044 | 0.7769 |
226
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.8224 ± 0.0156 | 0.7402 ± 0.0095 | 0.9820 ± 0.0138 | 0.7702 ± 0.0020 | 0.6303 ± 0.0039 | 0.7360 ± 0.0036 | 0.7802 |
227
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 0.7924 ± 0.0155 | 0.7282 ± 0.0058 | 0.9725 ± 0.0213 | 0.7770 ± 0.0047 | 0.6542 ± 0.0128 | *0.7646 ± 0.0039* | 0.7815 |
228
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.8213 ± 0.0051 | 0.7356 ± 0.0094 | 0.9664 ± 0.0202 | 0.7750 ± 0.0048 | 0.6415 ± 0.0094 | 0.7263 ± 0.0036 | 0.7777 |
229
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | *0.8346 ± 0.0045* | *0.7573 ± 0.0120* | ***0.9938 ± 0.0017*** | 0.7737 ± 0.0034 | ***0.6600 ± 0.0061*** | 0.7518 ± 0.0047 | 0.7952 |
230
+
231
+ #### Regression Datasets (RMSE - Lower is better)
232
+
233
+ | Model | BACE↓ | CLEARANCE↓ | ESOL↓ | FREESOLV↓ | LIPO↓ | AVG‡ |
234
+ | ---------------------------------------------------------------------------- | --------------------- | ---------------------- | --------------------- | --------------------- | --------------------- | ---------------- |
235
+ | **Tasks** | 1 | 1 | 1 | 1 | 1 | |
236
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 1.011 ± 0.038 | 51.582 ± 3.079 | 0.920 ± 0.011 | 0.536 ± 0.016 | 0.758 ± 0.013 | 0.8063 / 10.9614 |
237
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 1.094 ± 0.126 | 52.058 ± 2.767 | 0.829 ± 0.019 | 0.572 ± 0.023 | 0.728 ± 0.016 | 0.8058 / 11.0562 |
238
+ | MoLFormer-LHPC* | 1.201 ± 0.100 | 45.74 ± 2.637 | 0.848 ± 0.031 | 0.683 ± 0.040 | 0.895 ± 0.080 | 0.9068 / 9.8734 |
239
+ | ------------------------- | ------------------- | -------------------- | ------------------- | ------------------- | ------------------- | ---------------- |
240
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 1.0893 ± 0.1319 | 49.0005 ± 1.2787 | 0.8456 ± 0.0406 | 0.5491 ± 0.0134 | 0.7147 ± 0.0062 | 0.7997 / 10.4398 |
241
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.9931 ± 0.0258 | 45.4951 ± 0.7112 | 0.9319 ± 0.0153 | 0.6049 ± 0.0666 | 0.6874 ± 0.0040 | 0.8043 / 9.7425 |
242
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 1.0304 ± 0.1146 | 47.8418 ± 0.4070 | ***0.7669 ± 0.0024*** | 0.5293 ± 0.0267 | 0.6708 ± 0.0074 | 0.7493 / 10.1678 |
243
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.9713 ± 0.0224 | ***42.8010 ± 3.3475*** | 0.8169 ± 0.0268 | 0.5445 ± 0.0257 | 0.6820 ± 0.0028 | 0.7537 / 9.1631 |
244
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | ***0.9665 ± 0.0250*** | 44.0137 ± 1.1110 | 0.8158 ± 0.0115 | ***0.4979 ± 0.0158*** | ***0.6505 ± 0.0126*** | 0.7327 / 9.3889 |
245
+
246
+ **Bold** indicates the best result in the column; *italic* indicates the best result among ModChemBERT checkpoints.<br/>
247
+ \* Published results from the ChemBERTa-3 [7] paper for optimized chemical language models using DeepChem scaffold splits.<br/>
248
+ † AVG column shows the mean score across all classification tasks.<br/>
249
+ ‡ AVG column shows the mean scores across all regression tasks without and with the clearance score.
250
+
251
+ </details>
252
+
253
+ ## Optimized ModChemBERT Hyperparameters
254
+
255
+ <details><summary>Click to expand</summary>
256
+
257
+ ### TAFT Datasets
258
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
259
+
260
+ | Dataset | Learning Rate | Batch Size | Warmup Ratio | Classifier Pooling | Last k Layers |
261
+ | ---------------------- | ------------- | ---------- | ------------ | ------------------ | ------------- |
262
+ | adme_microsom_stab_h | 3e-5 | 8 | 0.0 | max_seq_mean | 5 |
263
+ | adme_microsom_stab_r | 3e-5 | 16 | 0.2 | max_cls | 3 |
264
+ | adme_permeability | 3e-5 | 8 | 0.0 | max_cls | 3 |
265
+ | adme_ppb_h | 1e-5 | 32 | 0.1 | max_seq_mean | 5 |
266
+ | adme_ppb_r | 1e-5 | 32 | 0.0 | sum_mean | N/A |
267
+ | adme_solubility | 3e-5 | 32 | 0.0 | sum_mean | N/A |
268
+ | astrazeneca_CL | 3e-5 | 8 | 0.1 | max_seq_mha | 3 |
269
+ | astrazeneca_LogD74 | 1e-5 | 8 | 0.0 | max_seq_mean | 5 |
270
+ | astrazeneca_PPB | 1e-5 | 32 | 0.0 | max_cls | 3 |
271
+ | astrazeneca_Solubility | 1e-5 | 32 | 0.0 | max_seq_mean | 5 |
272
+
273
+ ### Benchmarking Datasets
274
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
275
+
276
+ | Dataset | Batch Size | Classifier Pooling | Last k Layers | Pooling Attention Dropout | Classifier Dropout | Embedding Dropout |
277
+ | ------------------- | ---------- | ------------------ | ------------- | ------------------------- | ------------------ | ----------------- |
278
+ | bace_classification | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
279
+ | bbbp | 64 | max_cls | 3 | 0.1 | 0.0 | 0.0 |
280
+ | clintox | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
281
+ | hiv | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
282
+ | sider | 32 | mean | N/A | 0.1 | 0.0 | 0.1 |
283
+ | tox21 | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
284
+ | base_regression | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
285
+ | clearance | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
286
+ | esol | 64 | sum_mean | N/A | 0.1 | 0.0 | 0.1 |
287
+ | freesolv | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
288
+ | lipo | 32 | max_seq_mha | 3 | 0.1 | 0.1 | 0.1 |
289
+
290
+ </details>
291
+
292
+ ## Hardware
293
+ Training and experiments were performed on 2 NVIDIA RTX 3090 GPUs.
294
+
295
+ ## Citation
296
+ If you use ModChemBERT in your research, please cite the checkpoint and the following:
297
+ ```
298
+ @software{cortes-2025-modchembert,
299
+ author = {Emmanuel Cortes},
300
+ title = {ModChemBERT: ModernBERT as a Chemical Language Model},
301
+ year = {2025},
302
+ publisher = {GitHub},
303
+ howpublished = {GitHub repository},
304
+ url = {https://github.com/emapco/ModChemBERT}
305
+ }
306
+ ```
307
+
308
+ ## References
309
+ 1. Kallergis, Georgios, et al. "Domain adaptable language modeling of chemical compounds identifies potent pathoblockers for Pseudomonas aeruginosa." Communications Chemistry 8.1 (2025): 114.
310
+ 2. Behrendt, Maike, Stefan Sylvius Wagner, and Stefan Harmeling. "MaxPoolBERT: Enhancing BERT Classification via Layer-and Token-Wise Aggregation." arXiv preprint arXiv:2505.15696 (2025).
311
+ 3. Sultan, Afnan, et al. "Transformers for molecular property prediction: Domain adaptation efficiently improves performance." arXiv preprint arXiv:2503.03360 (2025).
312
+ 4. Warner, Benjamin, et al. "Smarter, better, faster, longer: A modern bidirectional encoder for fast, memory efficient, and long context finetuning and inference." arXiv preprint arXiv:2412.13663 (2024).
313
+ 5. Clavié, Benjamin. "JaColBERTv2.5: Optimising Multi-Vector Retrievers to Create State-of-the-Art Japanese Retrievers with Constrained Resources." Journal of Natural Language Processing 32.1 (2025): 176-218.
314
+ 6. Grattafiori, Aaron, et al. "The llama 3 herd of models." arXiv preprint arXiv:2407.21783 (2024).
315
+ 7. Singh, Riya, et al. "ChemBERTa-3: An Open Source Training Framework for Chemical Foundation Models." (2025).
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModChemBertForMaskedLM",
4
+ "ModChemBertForSequenceClassification"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_modchembert.ModChemBertConfig",
10
+ "AutoModelForMaskedLM": "modeling_modchembert.ModChemBertForMaskedLM",
11
+ "AutoModelForSequenceClassification": "modeling_modchembert.ModChemBertForSequenceClassification"
12
+ },
13
+ "bos_token_id": 0,
14
+ "classifier_activation": "gelu",
15
+ "classifier_bias": false,
16
+ "classifier_dropout": 0.0,
17
+ "classifier_pooling": "max_seq_mha",
18
+ "classifier_pooling_attention_dropout": 0.1,
19
+ "classifier_pooling_last_k": 3,
20
+ "classifier_pooling_num_attention_heads": 4,
21
+ "cls_token_id": 0,
22
+ "decoder_bias": true,
23
+ "deterministic_flash_attn": false,
24
+ "dtype": "float32",
25
+ "embedding_dropout": 0.1,
26
+ "eos_token_id": 1,
27
+ "global_attn_every_n_layers": 3,
28
+ "global_rope_theta": 160000.0,
29
+ "hidden_activation": "gelu",
30
+ "hidden_size": 768,
31
+ "initializer_cutoff_factor": 2.0,
32
+ "initializer_range": 0.02,
33
+ "intermediate_size": 1152,
34
+ "layer_norm_eps": 1e-05,
35
+ "local_attention": 8,
36
+ "local_rope_theta": 10000.0,
37
+ "max_position_embeddings": 256,
38
+ "mlp_bias": false,
39
+ "mlp_dropout": 0.1,
40
+ "model_type": "modchembert",
41
+ "norm_bias": false,
42
+ "norm_eps": 1e-05,
43
+ "num_attention_heads": 12,
44
+ "num_hidden_layers": 22,
45
+ "num_labels": 1,
46
+ "pad_token_id": 2,
47
+ "position_embedding_type": "absolute",
48
+ "repad_logits_with_grad": false,
49
+ "sep_token_id": 1,
50
+ "sparse_pred_ignore_index": -100,
51
+ "sparse_prediction": false,
52
+ "transformers_version": "4.56.1",
53
+ "vocab_size": 2362
54
+ }
configuration_modchembert.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Literal
16
+
17
+ from transformers.models.modernbert.configuration_modernbert import ModernBertConfig
18
+
19
+
20
+ class ModChemBertConfig(ModernBertConfig):
21
+ """
22
+ Configuration class for ModChemBert models.
23
+
24
+ This configuration class extends ModernBertConfig with additional parameters specific to
25
+ chemical molecule modeling and custom pooling strategies for classification/regression tasks.
26
+ It accepts all arguments and keyword arguments from ModernBertConfig.
27
+
28
+ Args:
29
+ classifier_pooling (str, optional): Pooling strategy for sequence classification.
30
+ Available options:
31
+ - "cls": Use CLS token representation
32
+ - "mean": Attention-weighted average pooling
33
+ - "sum_mean": Sum all hidden states across layers, then mean pool over sequence (ChemLM approach)
34
+ - "sum_sum": Sum all hidden states across layers, then sum pool over sequence
35
+ - "mean_mean": Mean all hidden states across layers, then mean pool over sequence
36
+ - "mean_sum": Mean all hidden states across layers, then sum pool over sequence
37
+ - "max_cls": Element-wise max pooling over last k hidden states, then take CLS token
38
+ - "cls_mha": Multi-head attention with CLS token as query and full sequence as keys/values
39
+ - "max_seq_mha": Max pooling over last k states + multi-head attention with CLS as query
40
+ - "max_seq_mean": Max pooling over last k hidden states, then mean pooling over sequence
41
+ Defaults to "sum_mean".
42
+ classifier_pooling_num_attention_heads (int, optional): Number of attention heads for multi-head attention
43
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 4.
44
+ classifier_pooling_attention_dropout (float, optional): Dropout probability for multi-head attention
45
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 0.0.
46
+ classifier_pooling_last_k (int, optional): Number of last hidden layers to use for max pooling
47
+ strategies (max_cls, max_seq_mha, max_seq_mean). Defaults to 8.
48
+ *args: Variable length argument list passed to ModernBertConfig.
49
+ **kwargs: Arbitrary keyword arguments passed to ModernBertConfig.
50
+
51
+ Note:
52
+ This class inherits all configuration parameters from ModernBertConfig including
53
+ hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, etc.
54
+ """
55
+
56
+ model_type = "modchembert"
57
+
58
+ def __init__(
59
+ self,
60
+ *args,
61
+ classifier_pooling: Literal[
62
+ "cls",
63
+ "mean",
64
+ "sum_mean",
65
+ "sum_sum",
66
+ "mean_mean",
67
+ "mean_sum",
68
+ "max_cls",
69
+ "cls_mha",
70
+ "max_seq_mha",
71
+ "max_seq_mean",
72
+ ] = "max_seq_mha",
73
+ classifier_pooling_num_attention_heads: int = 4,
74
+ classifier_pooling_attention_dropout: float = 0.0,
75
+ classifier_pooling_last_k: int = 8,
76
+ **kwargs,
77
+ ):
78
+ # Pass classifier_pooling="cls" to circumvent ValueError in ModernBertConfig init
79
+ super().__init__(*args, classifier_pooling="cls", **kwargs)
80
+ # Override with custom value
81
+ self.classifier_pooling = classifier_pooling
82
+ self.classifier_pooling_num_attention_heads = classifier_pooling_num_attention_heads
83
+ self.classifier_pooling_attention_dropout = classifier_pooling_attention_dropout
84
+ self.classifier_pooling_last_k = classifier_pooling_last_k
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_164756.log ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 16:47:56,600 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_classification
2
+ 2025-09-18 16:47:56,600 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - dataset: bace_classification, tasks: ['Class'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 16:47:56,606 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_classification at 2025-09-18_16-47-56
4
+ 2025-09-18 16:48:04,704 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5230 | Val mean-roc_auc_score: 0.6868
5
+ 2025-09-18 16:48:04,704 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 16:48:05,592 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6868
7
+ 2025-09-18 16:48:07,720 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3717 | Val mean-roc_auc_score: 0.6990
8
+ 2025-09-18 16:48:07,890 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
9
+ 2025-09-18 16:48:08,476 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.6990
10
+ 2025-09-18 16:48:13,091 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3504 | Val mean-roc_auc_score: 0.6950
11
+ 2025-09-18 16:48:17,982 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2697 | Val mean-roc_auc_score: 0.7095
12
+ 2025-09-18 16:48:18,167 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 152
13
+ 2025-09-18 16:48:18,775 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7095
14
+ 2025-09-18 16:48:23,885 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2270 | Val mean-roc_auc_score: 0.7119
15
+ 2025-09-18 16:48:24,086 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 190
16
+ 2025-09-18 16:48:24,649 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7119
17
+ 2025-09-18 16:48:29,211 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2188 | Val mean-roc_auc_score: 0.7563
18
+ 2025-09-18 16:48:29,764 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 228
19
+ 2025-09-18 16:48:30,416 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7563
20
+ 2025-09-18 16:48:35,782 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1776 | Val mean-roc_auc_score: 0.7058
21
+ 2025-09-18 16:48:38,535 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1875 | Val mean-roc_auc_score: 0.7059
22
+ 2025-09-18 16:48:43,135 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1612 | Val mean-roc_auc_score: 0.7198
23
+ 2025-09-18 16:48:48,183 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1826 | Val mean-roc_auc_score: 0.7424
24
+ 2025-09-18 16:48:52,726 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1458 | Val mean-roc_auc_score: 0.7269
25
+ 2025-09-18 16:48:57,884 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.7329
26
+ 2025-09-18 16:49:02,287 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1349 | Val mean-roc_auc_score: 0.7276
27
+ 2025-09-18 16:49:06,730 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1011 | Val mean-roc_auc_score: 0.7206
28
+ 2025-09-18 16:49:09,099 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0876 | Val mean-roc_auc_score: 0.7161
29
+ 2025-09-18 16:49:14,199 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0620 | Val mean-roc_auc_score: 0.7140
30
+ 2025-09-18 16:49:19,611 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1184 | Val mean-roc_auc_score: 0.7337
31
+ 2025-09-18 16:49:24,334 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7150
32
+ 2025-09-18 16:49:28,869 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0490 | Val mean-roc_auc_score: 0.7138
33
+ 2025-09-18 16:49:33,693 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0430 | Val mean-roc_auc_score: 0.6874
34
+ 2025-09-18 16:49:35,666 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0872 | Val mean-roc_auc_score: 0.7527
35
+ 2025-09-18 16:49:40,749 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0694 | Val mean-roc_auc_score: 0.7343
36
+ 2025-09-18 16:49:45,611 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0580 | Val mean-roc_auc_score: 0.7273
37
+ 2025-09-18 16:49:50,245 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0381 | Val mean-roc_auc_score: 0.7320
38
+ 2025-09-18 16:49:55,188 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0409 | Val mean-roc_auc_score: 0.7157
39
+ 2025-09-18 16:49:59,930 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.7348
40
+ 2025-09-18 16:50:05,920 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.7351
41
+ 2025-09-18 16:50:08,294 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0524 | Val mean-roc_auc_score: 0.7225
42
+ 2025-09-18 16:50:13,481 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0240 | Val mean-roc_auc_score: 0.7334
43
+ 2025-09-18 16:50:18,606 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0438 | Val mean-roc_auc_score: 0.7180
44
+ 2025-09-18 16:50:23,618 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0278 | Val mean-roc_auc_score: 0.7330
45
+ 2025-09-18 16:50:28,953 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0461 | Val mean-roc_auc_score: 0.7419
46
+ 2025-09-18 16:50:33,639 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0325 | Val mean-roc_auc_score: 0.7091
47
+ 2025-09-18 16:50:36,162 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0164 | Val mean-roc_auc_score: 0.7253
48
+ 2025-09-18 16:50:41,453 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0246 | Val mean-roc_auc_score: 0.7071
49
+ 2025-09-18 16:50:46,406 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0223 | Val mean-roc_auc_score: 0.7487
50
+ 2025-09-18 16:50:51,504 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0387 | Val mean-roc_auc_score: 0.7409
51
+ 2025-09-18 16:50:55,668 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0442 | Val mean-roc_auc_score: 0.7604
52
+ 2025-09-18 16:50:55,818 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 1444
53
+ 2025-09-18 16:50:56,342 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 38 with val mean-roc_auc_score: 0.7604
54
+ 2025-09-18 16:51:01,162 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0535 | Val mean-roc_auc_score: 0.7544
55
+ 2025-09-18 16:51:05,751 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0414 | Val mean-roc_auc_score: 0.7438
56
+ 2025-09-18 16:51:07,691 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0214 | Val mean-roc_auc_score: 0.7432
57
+ 2025-09-18 16:51:12,643 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.7474
58
+ 2025-09-18 16:51:17,026 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0234 | Val mean-roc_auc_score: 0.7449
59
+ 2025-09-18 16:51:21,879 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0711 | Val mean-roc_auc_score: 0.7273
60
+ 2025-09-18 16:51:26,652 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0404 | Val mean-roc_auc_score: 0.7263
61
+ 2025-09-18 16:51:31,726 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.7275
62
+ 2025-09-18 16:51:36,945 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.7309
63
+ 2025-09-18 16:51:39,445 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.7257
64
+ 2025-09-18 16:51:44,745 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.7296
65
+ 2025-09-18 16:51:49,676 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7227
66
+ 2025-09-18 16:51:54,631 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7189
67
+ 2025-09-18 16:51:59,632 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.7273
68
+ 2025-09-18 16:52:05,379 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.7142
69
+ 2025-09-18 16:52:07,912 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7225
70
+ 2025-09-18 16:52:12,441 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7207
71
+ 2025-09-18 16:52:17,077 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7188
72
+ 2025-09-18 16:52:22,169 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7172
73
+ 2025-09-18 16:52:26,285 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.7238
74
+ 2025-09-18 16:52:30,758 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.7193
75
+ 2025-09-18 16:52:35,395 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7221
76
+ 2025-09-18 16:52:37,367 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.7231
77
+ 2025-09-18 16:52:41,837 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0222 | Val mean-roc_auc_score: 0.7113
78
+ 2025-09-18 16:52:46,541 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0393 | Val mean-roc_auc_score: 0.7206
79
+ 2025-09-18 16:52:51,382 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0189 | Val mean-roc_auc_score: 0.6977
80
+ 2025-09-18 16:52:56,041 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.7253
81
+ 2025-09-18 16:53:00,700 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.7277
82
+ 2025-09-18 16:53:05,558 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.7084
83
+ 2025-09-18 16:53:07,656 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.7234
84
+ 2025-09-18 16:53:12,439 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.7047
85
+ 2025-09-18 16:53:17,316 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.6947
86
+ 2025-09-18 16:53:22,474 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0286 | Val mean-roc_auc_score: 0.7482
87
+ 2025-09-18 16:53:27,728 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0362 | Val mean-roc_auc_score: 0.7146
88
+ 2025-09-18 16:53:32,631 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0244 | Val mean-roc_auc_score: 0.7062
89
+ 2025-09-18 16:53:40,456 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0146 | Val mean-roc_auc_score: 0.7117
90
+ 2025-09-18 16:53:39,904 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.7051
91
+ 2025-09-18 16:53:44,431 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0151 | Val mean-roc_auc_score: 0.7131
92
+ 2025-09-18 16:53:49,503 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.7258
93
+ 2025-09-18 16:53:54,139 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7244
94
+ 2025-09-18 16:53:59,755 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7189
95
+ 2025-09-18 16:54:04,396 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7201
96
+ 2025-09-18 16:54:06,339 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0339 | Val mean-roc_auc_score: 0.7135
97
+ 2025-09-18 16:54:11,393 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0270 | Val mean-roc_auc_score: 0.6976
98
+ 2025-09-18 16:54:16,135 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7181
99
+ 2025-09-18 16:54:21,126 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7185
100
+ 2025-09-18 16:54:25,641 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7182
101
+ 2025-09-18 16:54:30,035 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7170
102
+ 2025-09-18 16:54:34,819 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.7211
103
+ 2025-09-18 16:54:37,452 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.7222
104
+ 2025-09-18 16:54:42,125 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0200 | Val mean-roc_auc_score: 0.7166
105
+ 2025-09-18 16:54:46,993 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0200 | Val mean-roc_auc_score: 0.7043
106
+ 2025-09-18 16:54:51,671 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0214 | Val mean-roc_auc_score: 0.7265
107
+ 2025-09-18 16:54:56,797 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.7126
108
+ 2025-09-18 16:55:01,827 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.6979
109
+ 2025-09-18 16:55:06,693 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7102
110
+ 2025-09-18 16:55:09,208 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0395 | Val mean-roc_auc_score: 0.6922
111
+ 2025-09-18 16:55:14,201 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0267 | Val mean-roc_auc_score: 0.7078
112
+ 2025-09-18 16:55:19,358 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.7078
113
+ 2025-09-18 16:55:23,943 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.7126
114
+ 2025-09-18 16:55:28,591 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7114
115
+ 2025-09-18 16:55:33,605 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7110
116
+ 2025-09-18 16:55:34,042 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8162
117
+ 2025-09-18 16:55:34,351 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_classification at 2025-09-18_16-55-34
118
+ 2025-09-18 16:55:36,011 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5592 | Val mean-roc_auc_score: 0.7005
119
+ 2025-09-18 16:55:36,011 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
120
+ 2025-09-18 16:55:36,789 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7005
121
+ 2025-09-18 16:55:41,614 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3980 | Val mean-roc_auc_score: 0.7028
122
+ 2025-09-18 16:55:41,791 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
123
+ 2025-09-18 16:55:42,328 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7028
124
+ 2025-09-18 16:55:47,263 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3281 | Val mean-roc_auc_score: 0.7126
125
+ 2025-09-18 16:55:47,440 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
126
+ 2025-09-18 16:55:48,006 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7126
127
+ 2025-09-18 16:55:52,806 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2829 | Val mean-roc_auc_score: 0.7114
128
+ 2025-09-18 16:55:57,321 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2368 | Val mean-roc_auc_score: 0.6982
129
+ 2025-09-18 16:56:02,266 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2143 | Val mean-roc_auc_score: 0.6855
130
+ 2025-09-18 16:56:07,325 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1842 | Val mean-roc_auc_score: 0.6963
131
+ 2025-09-18 16:56:09,268 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1328 | Val mean-roc_auc_score: 0.6979
132
+ 2025-09-18 16:56:14,246 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1727 | Val mean-roc_auc_score: 0.7342
133
+ 2025-09-18 16:56:14,430 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 342
134
+ 2025-09-18 16:56:15,011 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.7342
135
+ 2025-09-18 16:56:19,526 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1431 | Val mean-roc_auc_score: 0.7064
136
+ 2025-09-18 16:56:24,422 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1476 | Val mean-roc_auc_score: 0.6910
137
+ 2025-09-18 16:56:29,456 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6951
138
+ 2025-09-18 16:56:34,198 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1209 | Val mean-roc_auc_score: 0.7048
139
+ 2025-09-18 16:56:36,422 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0996 | Val mean-roc_auc_score: 0.6780
140
+ 2025-09-18 16:56:41,362 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7003
141
+ 2025-09-18 16:56:45,936 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1226 | Val mean-roc_auc_score: 0.6686
142
+ 2025-09-18 16:56:51,162 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0703 | Val mean-roc_auc_score: 0.7008
143
+ 2025-09-18 16:56:56,233 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0880 | Val mean-roc_auc_score: 0.6847
144
+ 2025-09-18 16:57:01,450 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0593 | Val mean-roc_auc_score: 0.6532
145
+ 2025-09-18 16:57:06,295 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0748 | Val mean-roc_auc_score: 0.6780
146
+ 2025-09-18 16:57:09,009 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0814 | Val mean-roc_auc_score: 0.6773
147
+ 2025-09-18 16:57:14,658 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0369 | Val mean-roc_auc_score: 0.6947
148
+ 2025-09-18 16:57:19,872 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0263 | Val mean-roc_auc_score: 0.6883
149
+ 2025-09-18 16:57:24,882 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0410 | Val mean-roc_auc_score: 0.6739
150
+ 2025-09-18 16:57:29,908 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0333 | Val mean-roc_auc_score: 0.6882
151
+ 2025-09-18 16:57:35,040 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0261 | Val mean-roc_auc_score: 0.6990
152
+ 2025-09-18 16:57:38,873 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0219 | Val mean-roc_auc_score: 0.6783
153
+ 2025-09-18 16:57:43,955 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0189 | Val mean-roc_auc_score: 0.6751
154
+ 2025-09-18 16:57:48,817 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0393 | Val mean-roc_auc_score: 0.6599
155
+ 2025-09-18 16:57:54,218 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0524 | Val mean-roc_auc_score: 0.6919
156
+ 2025-09-18 16:57:59,365 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0592 | Val mean-roc_auc_score: 0.6726
157
+ 2025-09-18 16:58:04,757 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0933 | Val mean-roc_auc_score: 0.6900
158
+ 2025-09-18 16:58:06,852 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0512 | Val mean-roc_auc_score: 0.6981
159
+ 2025-09-18 16:58:11,832 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0720 | Val mean-roc_auc_score: 0.6724
160
+ 2025-09-18 16:58:16,523 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.6790
161
+ 2025-09-18 16:58:21,265 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.6728
162
+ 2025-09-18 16:58:26,533 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0156 | Val mean-roc_auc_score: 0.6718
163
+ 2025-09-18 16:58:31,566 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0153 | Val mean-roc_auc_score: 0.6734
164
+ 2025-09-18 16:58:36,189 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.6760
165
+ 2025-09-18 16:58:38,326 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.6736
166
+ 2025-09-18 16:58:43,240 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.6748
167
+ 2025-09-18 16:58:48,501 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.6734
168
+ 2025-09-18 16:58:53,601 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0104 | Val mean-roc_auc_score: 0.6895
169
+ 2025-09-18 16:58:58,388 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.6811
170
+ 2025-09-18 16:59:03,440 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0183 | Val mean-roc_auc_score: 0.6725
171
+ 2025-09-18 16:59:05,968 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0347 | Val mean-roc_auc_score: 0.6671
172
+ 2025-09-18 16:59:11,547 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0818 | Val mean-roc_auc_score: 0.7142
173
+ 2025-09-18 16:59:16,587 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0326 | Val mean-roc_auc_score: 0.6925
174
+ 2025-09-18 16:59:21,727 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0221 | Val mean-roc_auc_score: 0.6981
175
+ 2025-09-18 16:59:26,819 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0181 | Val mean-roc_auc_score: 0.7037
176
+ 2025-09-18 16:59:31,834 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0347 | Val mean-roc_auc_score: 0.6872
177
+ 2025-09-18 16:59:36,806 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0438 | Val mean-roc_auc_score: 0.6887
178
+ 2025-09-18 16:59:40,657 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0385 | Val mean-roc_auc_score: 0.6836
179
+ 2025-09-18 16:59:45,407 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.6816
180
+ 2025-09-18 16:59:50,420 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.6792
181
+ 2025-09-18 16:59:55,193 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.6892
182
+ 2025-09-18 17:00:00,192 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0315 | Val mean-roc_auc_score: 0.6795
183
+ 2025-09-18 17:00:04,551 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0334 | Val mean-roc_auc_score: 0.6816
184
+ 2025-09-18 17:00:06,841 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.7009
185
+ 2025-09-18 17:00:11,666 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.6941
186
+ 2025-09-18 17:00:16,267 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.6928
187
+ 2025-09-18 17:00:21,268 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.6860
188
+ 2025-09-18 17:00:25,938 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.6884
189
+ 2025-09-18 17:00:30,680 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.6905
190
+ 2025-09-18 17:00:35,365 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.6927
191
+ 2025-09-18 17:00:37,628 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.6892
192
+ 2025-09-18 17:00:42,769 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.6882
193
+ 2025-09-18 17:00:47,730 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.6887
194
+ 2025-09-18 17:00:52,493 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.6826
195
+ 2025-09-18 17:00:57,606 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.6829
196
+ 2025-09-18 17:01:02,545 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0198 | Val mean-roc_auc_score: 0.6868
197
+ 2025-09-18 17:01:05,639 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0142 | Val mean-roc_auc_score: 0.6855
198
+ 2025-09-18 17:01:10,813 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0802 | Val mean-roc_auc_score: 0.7067
199
+ 2025-09-18 17:01:15,983 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0521 | Val mean-roc_auc_score: 0.6873
200
+ 2025-09-18 17:01:21,707 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0273 | Val mean-roc_auc_score: 0.6635
201
+ 2025-09-18 17:01:26,840 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.6645
202
+ 2025-09-18 17:01:32,134 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0174 | Val mean-roc_auc_score: 0.6517
203
+ 2025-09-18 17:01:37,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.6525
204
+ 2025-09-18 17:01:40,570 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.6549
205
+ 2025-09-18 17:01:45,555 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.6559
206
+ 2025-09-18 17:01:50,030 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.6563
207
+ 2025-09-18 17:01:54,683 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.6514
208
+ 2025-09-18 17:01:59,726 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.6528
209
+ 2025-09-18 17:02:04,512 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.6536
210
+ 2025-09-18 17:02:06,877 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.6536
211
+ 2025-09-18 17:02:11,949 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.6488
212
+ 2025-09-18 17:02:17,145 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.6514
213
+ 2025-09-18 17:02:21,807 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0391 | Val mean-roc_auc_score: 0.6910
214
+ 2025-09-18 17:02:26,322 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0199 | Val mean-roc_auc_score: 0.6839
215
+ 2025-09-18 17:02:31,160 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0523 | Val mean-roc_auc_score: 0.7002
216
+ 2025-09-18 17:02:35,858 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0399 | Val mean-roc_auc_score: 0.6902
217
+ 2025-09-18 17:02:37,955 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0146 | Val mean-roc_auc_score: 0.6939
218
+ 2025-09-18 17:02:43,076 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.6768
219
+ 2025-09-18 17:02:47,762 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.6905
220
+ 2025-09-18 17:02:52,530 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.6836
221
+ 2025-09-18 17:02:56,752 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0389 | Val mean-roc_auc_score: 0.6913
222
+ 2025-09-18 17:03:02,029 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0364 | Val mean-roc_auc_score: 0.6917
223
+ 2025-09-18 17:03:06,603 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0298 | Val mean-roc_auc_score: 0.6929
224
+ 2025-09-18 17:03:09,005 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.6903
225
+ 2025-09-18 17:03:13,758 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.6943
226
+ 2025-09-18 17:03:14,296 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8283
227
+ 2025-09-18 17:03:14,648 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_classification at 2025-09-18_17-03-14
228
+ 2025-09-18 17:03:18,636 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5296 | Val mean-roc_auc_score: 0.6934
229
+ 2025-09-18 17:03:18,636 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
230
+ 2025-09-18 17:03:19,381 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6934
231
+ 2025-09-18 17:03:24,217 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3947 | Val mean-roc_auc_score: 0.6942
232
+ 2025-09-18 17:03:24,396 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
233
+ 2025-09-18 17:03:24,947 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.6942
234
+ 2025-09-18 17:03:30,113 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3705 | Val mean-roc_auc_score: 0.7191
235
+ 2025-09-18 17:03:30,328 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
236
+ 2025-09-18 17:03:30,907 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7191
237
+ 2025-09-18 17:03:35,923 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2829 | Val mean-roc_auc_score: 0.7127
238
+ 2025-09-18 17:03:38,847 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2632 | Val mean-roc_auc_score: 0.7307
239
+ 2025-09-18 17:03:39,050 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 190
240
+ 2025-09-18 17:03:39,632 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7307
241
+ 2025-09-18 17:03:44,856 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2254 | Val mean-roc_auc_score: 0.6998
242
+ 2025-09-18 17:03:50,164 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1850 | Val mean-roc_auc_score: 0.7071
243
+ 2025-09-18 17:03:55,150 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2363 | Val mean-roc_auc_score: 0.6827
244
+ 2025-09-18 17:03:59,807 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1768 | Val mean-roc_auc_score: 0.7285
245
+ 2025-09-18 17:04:04,556 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1513 | Val mean-roc_auc_score: 0.7004
246
+ 2025-09-18 17:04:06,414 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1484 | Val mean-roc_auc_score: 0.7416
247
+ 2025-09-18 17:04:06,953 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 418
248
+ 2025-09-18 17:04:07,548 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.7416
249
+ 2025-09-18 17:04:12,589 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1110 | Val mean-roc_auc_score: 0.6608
250
+ 2025-09-18 17:04:17,292 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1275 | Val mean-roc_auc_score: 0.7156
251
+ 2025-09-18 17:04:22,468 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0972 | Val mean-roc_auc_score: 0.6981
252
+ 2025-09-18 17:04:27,619 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0979 | Val mean-roc_auc_score: 0.7032
253
+ 2025-09-18 17:04:32,362 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1157 | Val mean-roc_auc_score: 0.7393
254
+ 2025-09-18 17:04:37,143 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0724 | Val mean-roc_auc_score: 0.6610
255
+ 2025-09-18 17:04:39,189 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1028 | Val mean-roc_auc_score: 0.7219
256
+ 2025-09-18 17:04:43,755 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.7204
257
+ 2025-09-18 17:04:48,684 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0463 | Val mean-roc_auc_score: 0.7109
258
+ 2025-09-18 17:04:53,291 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0715 | Val mean-roc_auc_score: 0.7266
259
+ 2025-09-18 17:04:58,313 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0629 | Val mean-roc_auc_score: 0.7046
260
+ 2025-09-18 17:05:02,925 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0715 | Val mean-roc_auc_score: 0.7171
261
+ 2025-09-18 17:05:07,939 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0251 | Val mean-roc_auc_score: 0.6974
262
+ 2025-09-18 17:05:10,395 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7236
263
+ 2025-09-18 17:05:15,175 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0481 | Val mean-roc_auc_score: 0.7052
264
+ 2025-09-18 17:05:22,021 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0374 | Val mean-roc_auc_score: 0.7256
265
+ 2025-09-18 17:05:27,022 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0498 | Val mean-roc_auc_score: 0.6994
266
+ 2025-09-18 17:05:31,612 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.7420
267
+ 2025-09-18 17:05:31,770 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 1102
268
+ 2025-09-18 17:05:32,319 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 29 with val mean-roc_auc_score: 0.7420
269
+ 2025-09-18 17:05:37,139 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0863 | Val mean-roc_auc_score: 0.6942
270
+ 2025-09-18 17:05:38,986 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0356 | Val mean-roc_auc_score: 0.7102
271
+ 2025-09-18 17:05:43,822 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0172 | Val mean-roc_auc_score: 0.7067
272
+ 2025-09-18 17:05:48,891 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0222 | Val mean-roc_auc_score: 0.7006
273
+ 2025-09-18 17:05:53,981 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0284 | Val mean-roc_auc_score: 0.7238
274
+ 2025-09-18 17:05:58,745 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0311 | Val mean-roc_auc_score: 0.7096
275
+ 2025-09-18 17:06:03,438 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0206 | Val mean-roc_auc_score: 0.6977
276
+ 2025-09-18 17:06:06,025 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0256 | Val mean-roc_auc_score: 0.7120
277
+ 2025-09-18 17:06:10,852 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0222 | Val mean-roc_auc_score: 0.6959
278
+ 2025-09-18 17:06:15,932 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.6892
279
+ 2025-09-18 17:06:20,594 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0242 | Val mean-roc_auc_score: 0.6866
280
+ 2025-09-18 17:06:25,293 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.6998
281
+ 2025-09-18 17:06:30,078 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.6942
282
+ 2025-09-18 17:06:34,710 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0485 | Val mean-roc_auc_score: 0.6823
283
+ 2025-09-18 17:06:36,902 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1291 | Val mean-roc_auc_score: 0.7050
284
+ 2025-09-18 17:06:41,370 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0398 | Val mean-roc_auc_score: 0.7120
285
+ 2025-09-18 17:06:45,913 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0364 | Val mean-roc_auc_score: 0.7062
286
+ 2025-09-18 17:06:50,592 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0257 | Val mean-roc_auc_score: 0.7106
287
+ 2025-09-18 17:06:55,264 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.7045
288
+ 2025-09-18 17:06:59,399 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.7052
289
+ 2025-09-18 17:07:04,449 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.7066
290
+ 2025-09-18 17:07:07,354 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.7074
291
+ 2025-09-18 17:07:12,776 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.7176
292
+ 2025-09-18 17:07:18,535 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0262 | Val mean-roc_auc_score: 0.7219
293
+ 2025-09-18 17:07:23,599 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.7110
294
+ 2025-09-18 17:07:28,627 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.7069
295
+ 2025-09-18 17:07:33,660 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.7092
296
+ 2025-09-18 17:07:41,620 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.7045
297
+ 2025-09-18 17:07:41,128 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.7394
298
+ 2025-09-18 17:07:46,390 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0822 | Val mean-roc_auc_score: 0.7349
299
+ 2025-09-18 17:07:51,700 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0233 | Val mean-roc_auc_score: 0.7315
300
+ 2025-09-18 17:07:56,898 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0131 | Val mean-roc_auc_score: 0.7253
301
+ 2025-09-18 17:08:02,588 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.7249
302
+ 2025-09-18 17:08:07,805 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.7219
303
+ 2025-09-18 17:08:10,073 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7188
304
+ 2025-09-18 17:08:14,975 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7176
305
+ 2025-09-18 17:08:20,062 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.7167
306
+ 2025-09-18 17:08:25,228 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7162
307
+ 2025-09-18 17:08:30,049 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7115
308
+ 2025-09-18 17:08:34,756 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7205
309
+ 2025-09-18 17:08:36,946 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.7258
310
+ 2025-09-18 17:08:41,544 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0113 | Val mean-roc_auc_score: 0.7439
311
+ 2025-09-18 17:08:42,000 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 2698
312
+ 2025-09-18 17:08:42,669 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 71 with val mean-roc_auc_score: 0.7439
313
+ 2025-09-18 17:08:47,527 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7236
314
+ 2025-09-18 17:08:52,298 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7191
315
+ 2025-09-18 17:08:56,904 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7194
316
+ 2025-09-18 17:09:01,773 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7223
317
+ 2025-09-18 17:09:06,375 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.7126
318
+ 2025-09-18 17:09:08,445 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7214
319
+ 2025-09-18 17:09:12,869 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7213
320
+ 2025-09-18 17:09:18,347 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0283 | Val mean-roc_auc_score: 0.7225
321
+ 2025-09-18 17:09:23,201 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.7245
322
+ 2025-09-18 17:09:28,359 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.7111
323
+ 2025-09-18 17:09:33,120 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.1074 | Val mean-roc_auc_score: 0.7347
324
+ 2025-09-18 17:09:37,936 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0403 | Val mean-roc_auc_score: 0.7062
325
+ 2025-09-18 17:09:39,923 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0186 | Val mean-roc_auc_score: 0.7207
326
+ 2025-09-18 17:09:44,354 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7169
327
+ 2025-09-18 17:09:49,031 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.7132
328
+ 2025-09-18 17:09:53,810 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7153
329
+ 2025-09-18 17:09:58,474 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7147
330
+ 2025-09-18 17:10:03,071 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.7122
331
+ 2025-09-18 17:10:07,609 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.7186
332
+ 2025-09-18 17:10:09,758 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.7184
333
+ 2025-09-18 17:10:14,338 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7187
334
+ 2025-09-18 17:10:19,156 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.7233
335
+ 2025-09-18 17:10:23,733 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.7238
336
+ 2025-09-18 17:10:28,240 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7162
337
+ 2025-09-18 17:10:32,450 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7177
338
+ 2025-09-18 17:10:37,028 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.7188
339
+ 2025-09-18 17:10:38,733 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.7187
340
+ 2025-09-18 17:10:43,505 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7177
341
+ 2025-09-18 17:10:48,410 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7155
342
+ 2025-09-18 17:10:48,872 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8195
343
+ 2025-09-18 17:10:49,208 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.8213, Std Dev: 0.0051
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_171049.log ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 17:10:49,229 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Running benchmark for dataset: bbbp
2
+ 2025-09-18 17:10:49,230 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - dataset: bbbp, tasks: ['p_np'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 17:10:49,234 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bbbp at 2025-09-18_17-10-49
4
+ 2025-09-18 17:10:53,496 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2452 | Val mean-roc_auc_score: 0.9863
5
+ 2025-09-18 17:10:53,497 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
6
+ 2025-09-18 17:10:54,260 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9863
7
+ 2025-09-18 17:10:59,765 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0889 | Val mean-roc_auc_score: 0.9783
8
+ 2025-09-18 17:11:05,716 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1214 | Val mean-roc_auc_score: 0.9881
9
+ 2025-09-18 17:11:05,885 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 156
10
+ 2025-09-18 17:11:06,491 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9881
11
+ 2025-09-18 17:11:10,147 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.9854
12
+ 2025-09-18 17:11:15,683 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0493 | Val mean-roc_auc_score: 0.9886
13
+ 2025-09-18 17:11:15,859 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 260
14
+ 2025-09-18 17:11:16,409 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9886
15
+ 2025-09-18 17:11:21,948 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.9799
16
+ 2025-09-18 17:11:28,033 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0409 | Val mean-roc_auc_score: 0.9850
17
+ 2025-09-18 17:11:33,682 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0334 | Val mean-roc_auc_score: 0.9852
18
+ 2025-09-18 17:11:37,312 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0454 | Val mean-roc_auc_score: 0.9848
19
+ 2025-09-18 17:11:43,150 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0146 | Val mean-roc_auc_score: 0.9864
20
+ 2025-09-18 17:11:48,864 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.9878
21
+ 2025-09-18 17:11:55,367 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.9899
22
+ 2025-09-18 17:11:55,514 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 624
23
+ 2025-09-18 17:11:56,154 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9899
24
+ 2025-09-18 17:12:02,756 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9898
25
+ 2025-09-18 17:12:09,059 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9895
26
+ 2025-09-18 17:12:13,084 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9896
27
+ 2025-09-18 17:12:19,095 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0972 | Val mean-roc_auc_score: 0.9813
28
+ 2025-09-18 17:12:25,673 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0724 | Val mean-roc_auc_score: 0.9829
29
+ 2025-09-18 17:12:31,798 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0264 | Val mean-roc_auc_score: 0.9867
30
+ 2025-09-18 17:12:37,621 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.9857
31
+ 2025-09-18 17:12:42,249 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.9868
32
+ 2025-09-18 17:12:48,371 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9876
33
+ 2025-09-18 17:12:54,669 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9874
34
+ 2025-09-18 17:13:00,522 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9868
35
+ 2025-09-18 17:13:06,291 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9878
36
+ 2025-09-18 17:13:09,636 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9876
37
+ 2025-09-18 17:13:15,380 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.9873
38
+ 2025-09-18 17:13:21,432 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9860
39
+ 2025-09-18 17:13:26,805 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9874
40
+ 2025-09-18 17:13:32,759 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9854
41
+ 2025-09-18 17:13:38,538 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9859
42
+ 2025-09-18 17:13:41,689 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9871
43
+ 2025-09-18 17:13:47,697 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9868
44
+ 2025-09-18 17:13:53,234 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9853
45
+ 2025-09-18 17:13:58,880 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9846
46
+ 2025-09-18 17:14:04,633 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9864
47
+ 2025-09-18 17:14:07,792 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9865
48
+ 2025-09-18 17:14:13,915 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9849
49
+ 2025-09-18 17:14:19,679 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9839
50
+ 2025-09-18 17:14:26,417 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9867
51
+ 2025-09-18 17:14:32,060 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9780
52
+ 2025-09-18 17:14:37,526 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9818
53
+ 2025-09-18 17:14:41,194 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9896
54
+ 2025-09-18 17:14:46,928 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.9888
55
+ 2025-09-18 17:14:52,513 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9882
56
+ 2025-09-18 17:14:58,146 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9878
57
+ 2025-09-18 17:15:04,068 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9867
58
+ 2025-09-18 17:15:07,764 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9872
59
+ 2025-09-18 17:15:13,350 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9869
60
+ 2025-09-18 17:15:19,330 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9868
61
+ 2025-09-18 17:15:25,080 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9865
62
+ 2025-09-18 17:15:30,863 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9860
63
+ 2025-09-18 17:15:36,916 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9868
64
+ 2025-09-18 17:15:40,061 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9871
65
+ 2025-09-18 17:15:45,892 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9863
66
+ 2025-09-18 17:15:51,531 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9859
67
+ 2025-09-18 17:15:57,501 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9845
68
+ 2025-09-18 17:16:03,743 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9858
69
+ 2025-09-18 17:16:08,219 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9834
70
+ 2025-09-18 17:16:13,910 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9848
71
+ 2025-09-18 17:16:19,665 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9830
72
+ 2025-09-18 17:16:25,347 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9847
73
+ 2025-09-18 17:16:31,132 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9893
74
+ 2025-09-18 17:16:36,694 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9878
75
+ 2025-09-18 17:16:39,672 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9900
76
+ 2025-09-18 17:16:39,816 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3328
77
+ 2025-09-18 17:16:40,378 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 64 with val mean-roc_auc_score: 0.9900
78
+ 2025-09-18 17:16:46,316 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9899
79
+ 2025-09-18 17:16:51,876 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9888
80
+ 2025-09-18 17:16:57,906 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9892
81
+ 2025-09-18 17:17:03,668 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9891
82
+ 2025-09-18 17:17:12,419 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9894
83
+ 2025-09-18 17:17:13,176 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9895
84
+ 2025-09-18 17:17:19,464 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9875
85
+ 2025-09-18 17:17:25,903 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9884
86
+ 2025-09-18 17:17:31,663 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9885
87
+ 2025-09-18 17:17:37,487 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9868
88
+ 2025-09-18 17:17:40,666 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9882
89
+ 2025-09-18 17:17:46,933 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9874
90
+ 2025-09-18 17:17:54,506 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.9868
91
+ 2025-09-18 17:18:00,040 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9871
92
+ 2025-09-18 17:18:05,288 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.9872
93
+ 2025-09-18 17:18:08,080 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9873
94
+ 2025-09-18 17:18:13,272 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.9868
95
+ 2025-09-18 17:18:19,237 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9862
96
+ 2025-09-18 17:18:24,558 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9866
97
+ 2025-09-18 17:18:30,195 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9868
98
+ 2025-09-18 17:18:35,855 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9883
99
+ 2025-09-18 17:18:38,713 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9873
100
+ 2025-09-18 17:18:44,534 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9870
101
+ 2025-09-18 17:18:49,585 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9871
102
+ 2025-09-18 17:18:55,089 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9876
103
+ 2025-09-18 17:19:00,861 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9869
104
+ 2025-09-18 17:19:06,584 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.9875
105
+ 2025-09-18 17:19:10,059 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9875
106
+ 2025-09-18 17:19:15,785 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9878
107
+ 2025-09-18 17:19:21,643 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9877
108
+ 2025-09-18 17:19:27,315 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9859
109
+ 2025-09-18 17:19:32,972 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9859
110
+ 2025-09-18 17:19:37,739 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9869
111
+ 2025-09-18 17:19:43,453 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9864
112
+ 2025-09-18 17:19:48,993 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9881
113
+ 2025-09-18 17:19:53,790 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9871
114
+ 2025-09-18 17:19:54,231 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7438
115
+ 2025-09-18 17:19:54,567 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bbbp at 2025-09-18_17-19-54
116
+ 2025-09-18 17:19:59,322 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2224 | Val mean-roc_auc_score: 0.9789
117
+ 2025-09-18 17:19:59,322 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
118
+ 2025-09-18 17:19:59,923 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9789
119
+ 2025-09-18 17:20:05,617 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1680 | Val mean-roc_auc_score: 0.9838
120
+ 2025-09-18 17:20:05,783 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
121
+ 2025-09-18 17:20:06,323 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9838
122
+ 2025-09-18 17:20:09,541 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1214 | Val mean-roc_auc_score: 0.9823
123
+ 2025-09-18 17:20:15,273 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1426 | Val mean-roc_auc_score: 0.9825
124
+ 2025-09-18 17:20:20,767 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0829 | Val mean-roc_auc_score: 0.9849
125
+ 2025-09-18 17:20:20,959 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 260
126
+ 2025-09-18 17:20:21,535 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9849
127
+ 2025-09-18 17:20:27,131 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0658 | Val mean-roc_auc_score: 0.9835
128
+ 2025-09-18 17:20:32,980 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0320 | Val mean-roc_auc_score: 0.9855
129
+ 2025-09-18 17:20:33,162 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 364
130
+ 2025-09-18 17:20:33,694 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.9855
131
+ 2025-09-18 17:20:39,312 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0388 | Val mean-roc_auc_score: 0.9854
132
+ 2025-09-18 17:20:42,414 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0300 | Val mean-roc_auc_score: 0.9871
133
+ 2025-09-18 17:20:42,587 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 468
134
+ 2025-09-18 17:20:43,120 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.9871
135
+ 2025-09-18 17:20:48,976 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.9875
136
+ 2025-09-18 17:20:49,147 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 520
137
+ 2025-09-18 17:20:49,677 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9875
138
+ 2025-09-18 17:20:55,211 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.9887
139
+ 2025-09-18 17:20:55,739 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 572
140
+ 2025-09-18 17:20:56,290 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9887
141
+ 2025-09-18 17:21:02,067 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.9884
142
+ 2025-09-18 17:21:07,847 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.9861
143
+ 2025-09-18 17:21:11,050 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9869
144
+ 2025-09-18 17:21:16,525 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9848
145
+ 2025-09-18 17:21:22,047 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.9891
146
+ 2025-09-18 17:21:22,542 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 832
147
+ 2025-09-18 17:21:23,107 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val mean-roc_auc_score: 0.9891
148
+ 2025-09-18 17:21:29,007 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.9770
149
+ 2025-09-18 17:21:34,407 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0981 | Val mean-roc_auc_score: 0.9888
150
+ 2025-09-18 17:21:39,672 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0478 | Val mean-roc_auc_score: 0.9865
151
+ 2025-09-18 17:21:43,201 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0217 | Val mean-roc_auc_score: 0.9868
152
+ 2025-09-18 17:21:48,240 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9870
153
+ 2025-09-18 17:21:54,207 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.9860
154
+ 2025-09-18 17:21:59,517 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9871
155
+ 2025-09-18 17:22:05,148 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9872
156
+ 2025-09-18 17:22:08,698 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9876
157
+ 2025-09-18 17:22:14,812 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9877
158
+ 2025-09-18 17:22:20,892 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9877
159
+ 2025-09-18 17:22:26,671 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9873
160
+ 2025-09-18 17:22:32,714 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9873
161
+ 2025-09-18 17:22:38,554 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9877
162
+ 2025-09-18 17:22:41,847 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0282 | Val mean-roc_auc_score: 0.9880
163
+ 2025-09-18 17:22:48,198 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0255 | Val mean-roc_auc_score: 0.9877
164
+ 2025-09-18 17:22:54,388 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0276 | Val mean-roc_auc_score: 0.9862
165
+ 2025-09-18 17:23:00,400 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.9859
166
+ 2025-09-18 17:23:06,133 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.9852
167
+ 2025-09-18 17:23:09,706 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9854
168
+ 2025-09-18 17:23:16,301 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9861
169
+ 2025-09-18 17:23:22,481 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9862
170
+ 2025-09-18 17:23:29,060 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9863
171
+ 2025-09-18 17:23:34,757 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9863
172
+ 2025-09-18 17:23:37,772 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9864
173
+ 2025-09-18 17:23:43,718 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9863
174
+ 2025-09-18 17:23:48,761 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9860
175
+ 2025-09-18 17:23:54,410 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9864
176
+ 2025-09-18 17:24:00,010 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9862
177
+ 2025-09-18 17:24:05,489 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9868
178
+ 2025-09-18 17:24:08,635 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9871
179
+ 2025-09-18 17:24:14,227 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9871
180
+ 2025-09-18 17:24:19,782 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9872
181
+ 2025-09-18 17:24:25,551 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9869
182
+ 2025-09-18 17:24:31,031 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9878
183
+ 2025-09-18 17:24:37,036 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9877
184
+ 2025-09-18 17:24:40,249 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9879
185
+ 2025-09-18 17:24:45,726 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9874
186
+ 2025-09-18 17:24:51,005 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9872
187
+ 2025-09-18 17:24:56,457 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9870
188
+ 2025-09-18 17:25:02,568 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9872
189
+ 2025-09-18 17:25:08,755 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9871
190
+ 2025-09-18 17:25:11,125 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9876
191
+ 2025-09-18 17:25:15,576 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9875
192
+ 2025-09-18 17:25:19,756 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9877
193
+ 2025-09-18 17:25:24,798 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9881
194
+ 2025-09-18 17:25:29,553 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9881
195
+ 2025-09-18 17:25:33,764 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9882
196
+ 2025-09-18 17:25:38,219 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9880
197
+ 2025-09-18 17:25:39,828 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9883
198
+ 2025-09-18 17:25:44,537 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9874
199
+ 2025-09-18 17:25:49,214 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9880
200
+ 2025-09-18 17:25:53,579 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9879
201
+ 2025-09-18 17:25:57,972 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9875
202
+ 2025-09-18 17:26:02,261 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9881
203
+ 2025-09-18 17:26:06,747 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9878
204
+ 2025-09-18 17:26:08,758 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9882
205
+ 2025-09-18 17:26:12,957 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9877
206
+ 2025-09-18 17:26:17,084 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9879
207
+ 2025-09-18 17:26:21,376 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9880
208
+ 2025-09-18 17:26:27,035 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.9882
209
+ 2025-09-18 17:26:31,523 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9877
210
+ 2025-09-18 17:26:35,704 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9883
211
+ 2025-09-18 17:26:39,863 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9885
212
+ 2025-09-18 17:26:41,564 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9881
213
+ 2025-09-18 17:26:45,961 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9878
214
+ 2025-09-18 17:26:50,120 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9884
215
+ 2025-09-18 17:26:54,153 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9881
216
+ 2025-09-18 17:26:58,220 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0273 | Val mean-roc_auc_score: 0.9861
217
+ 2025-09-18 17:27:02,953 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0233 | Val mean-roc_auc_score: 0.9886
218
+ 2025-09-18 17:27:08,724 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0174 | Val mean-roc_auc_score: 0.9847
219
+ 2025-09-18 17:27:10,918 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.9877
220
+ 2025-09-18 17:27:15,786 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.9883
221
+ 2025-09-18 17:27:20,340 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0161 | Val mean-roc_auc_score: 0.9900
222
+ 2025-09-18 17:27:20,489 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 4680
223
+ 2025-09-18 17:27:21,090 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 90 with val mean-roc_auc_score: 0.9900
224
+ 2025-09-18 17:27:25,802 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9895
225
+ 2025-09-18 17:27:30,777 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9890
226
+ 2025-09-18 17:27:35,276 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9886
227
+ 2025-09-18 17:27:39,909 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9882
228
+ 2025-09-18 17:27:41,903 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9884
229
+ 2025-09-18 17:27:46,397 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9878
230
+ 2025-09-18 17:27:52,282 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9884
231
+ 2025-09-18 17:27:57,003 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9878
232
+ 2025-09-18 17:28:01,205 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9887
233
+ 2025-09-18 17:28:05,492 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9886
234
+ 2025-09-18 17:28:05,995 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7404
235
+ 2025-09-18 17:28:06,427 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bbbp at 2025-09-18_17-28-06
236
+ 2025-09-18 17:28:10,195 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2115 | Val mean-roc_auc_score: 0.9861
237
+ 2025-09-18 17:28:10,195 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
238
+ 2025-09-18 17:28:08,359 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9861
239
+ 2025-09-18 17:28:12,554 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5000 | Val mean-roc_auc_score: 0.9833
240
+ 2025-09-18 17:28:16,991 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0992 | Val mean-roc_auc_score: 0.9837
241
+ 2025-09-18 17:28:21,243 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.9889
242
+ 2025-09-18 17:28:21,399 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 208
243
+ 2025-09-18 17:28:21,904 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9889
244
+ 2025-09-18 17:28:26,271 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0499 | Val mean-roc_auc_score: 0.9876
245
+ 2025-09-18 17:28:30,808 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0475 | Val mean-roc_auc_score: 0.9900
246
+ 2025-09-18 17:28:31,282 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 312
247
+ 2025-09-18 17:28:31,798 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9900
248
+ 2025-09-18 17:28:36,660 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0556 | Val mean-roc_auc_score: 0.9845
249
+ 2025-09-18 17:28:38,765 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0564 | Val mean-roc_auc_score: 0.9855
250
+ 2025-09-18 17:28:43,541 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0261 | Val mean-roc_auc_score: 0.9865
251
+ 2025-09-18 17:28:48,256 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0131 | Val mean-roc_auc_score: 0.9898
252
+ 2025-09-18 17:28:52,409 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.9890
253
+ 2025-09-18 17:28:56,904 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.9901
254
+ 2025-09-18 17:28:57,062 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 624
255
+ 2025-09-18 17:28:57,581 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9901
256
+ 2025-09-18 17:29:02,168 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.9899
257
+ 2025-09-18 17:29:06,842 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9882
258
+ 2025-09-18 17:29:08,754 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.9907
259
+ 2025-09-18 17:29:08,932 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 780
260
+ 2025-09-18 17:29:09,461 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val mean-roc_auc_score: 0.9907
261
+ 2025-09-18 17:29:14,172 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0603 | Val mean-roc_auc_score: 0.9898
262
+ 2025-09-18 17:29:19,373 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0245 | Val mean-roc_auc_score: 0.9911
263
+ 2025-09-18 17:29:19,547 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 884
264
+ 2025-09-18 17:29:20,157 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val mean-roc_auc_score: 0.9911
265
+ 2025-09-18 17:29:24,898 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0104 | Val mean-roc_auc_score: 0.9886
266
+ 2025-09-18 17:29:29,376 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.9893
267
+ 2025-09-18 17:29:34,838 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9912
268
+ 2025-09-18 17:29:35,011 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1040
269
+ 2025-09-18 17:29:35,560 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val mean-roc_auc_score: 0.9912
270
+ 2025-09-18 17:29:39,999 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9898
271
+ 2025-09-18 17:29:42,499 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9884
272
+ 2025-09-18 17:29:46,986 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9870
273
+ 2025-09-18 17:29:51,498 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9905
274
+ 2025-09-18 17:29:56,206 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9894
275
+ 2025-09-18 17:30:00,905 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9888
276
+ 2025-09-18 17:30:05,997 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9917
277
+ 2025-09-18 17:30:06,156 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1404
278
+ 2025-09-18 17:30:06,709 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val mean-roc_auc_score: 0.9917
279
+ 2025-09-18 17:30:08,775 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9913
280
+ 2025-09-18 17:30:13,321 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0408 | Val mean-roc_auc_score: 0.9823
281
+ 2025-09-18 17:30:17,568 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0326 | Val mean-roc_auc_score: 0.9850
282
+ 2025-09-18 17:30:21,951 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0409 | Val mean-roc_auc_score: 0.9932
283
+ 2025-09-18 17:30:22,478 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1612
284
+ 2025-09-18 17:30:23,026 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 31 with val mean-roc_auc_score: 0.9932
285
+ 2025-09-18 17:30:27,558 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0227 | Val mean-roc_auc_score: 0.9922
286
+ 2025-09-18 17:30:31,849 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9922
287
+ 2025-09-18 17:30:36,261 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9927
288
+ 2025-09-18 17:30:40,509 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9924
289
+ 2025-09-18 17:30:42,417 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9925
290
+ 2025-09-18 17:30:47,120 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9926
291
+ 2025-09-18 17:30:51,466 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9929
292
+ 2025-09-18 17:30:56,531 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9925
293
+ 2025-09-18 17:31:00,554 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9929
294
+ 2025-09-18 17:31:04,668 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9931
295
+ 2025-09-18 17:31:09,554 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9932
296
+ 2025-09-18 17:31:09,695 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 2184
297
+ 2025-09-18 17:31:10,208 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 42 with val mean-roc_auc_score: 0.9932
298
+ 2025-09-18 17:31:12,020 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9929
299
+ 2025-09-18 17:31:16,062 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9932
300
+ 2025-09-18 17:31:16,230 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 2288
301
+ 2025-09-18 17:31:16,748 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 44 with val mean-roc_auc_score: 0.9932
302
+ 2025-09-18 17:31:20,863 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9930
303
+ 2025-09-18 17:31:24,985 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9934
304
+ 2025-09-18 17:31:25,469 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 2392
305
+ 2025-09-18 17:31:25,992 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 46 with val mean-roc_auc_score: 0.9934
306
+ 2025-09-18 17:31:30,535 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9929
307
+ 2025-09-18 17:31:34,834 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9931
308
+ 2025-09-18 17:31:38,965 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9933
309
+ 2025-09-18 17:31:40,568 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9934
310
+ 2025-09-18 17:31:40,708 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 2600
311
+ 2025-09-18 17:31:41,237 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 50 with val mean-roc_auc_score: 0.9934
312
+ 2025-09-18 17:31:45,497 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.9932
313
+ 2025-09-18 17:31:50,055 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0173 | Val mean-roc_auc_score: 0.9938
314
+ 2025-09-18 17:31:50,228 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 2704
315
+ 2025-09-18 17:31:50,747 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 52 with val mean-roc_auc_score: 0.9938
316
+ 2025-09-18 17:31:55,226 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9933
317
+ 2025-09-18 17:31:59,336 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9936
318
+ 2025-09-18 17:32:03,418 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9924
319
+ 2025-09-18 17:32:07,566 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9927
320
+ 2025-09-18 17:32:09,655 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9931
321
+ 2025-09-18 17:32:14,589 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9931
322
+ 2025-09-18 17:32:18,785 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9932
323
+ 2025-09-18 17:32:23,118 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9930
324
+ 2025-09-18 17:32:27,484 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9930
325
+ 2025-09-18 17:32:32,262 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9930
326
+ 2025-09-18 17:32:36,600 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9934
327
+ 2025-09-18 17:32:38,298 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9932
328
+ 2025-09-18 17:32:42,719 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9932
329
+ 2025-09-18 17:32:47,208 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9932
330
+ 2025-09-18 17:32:51,988 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9931
331
+ 2025-09-18 17:32:56,616 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9930
332
+ 2025-09-18 17:33:00,851 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9929
333
+ 2025-09-18 17:33:05,127 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9930
334
+ 2025-09-18 17:33:09,350 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9931
335
+ 2025-09-18 17:33:11,642 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9910
336
+ 2025-09-18 17:33:15,929 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9929
337
+ 2025-09-18 17:33:20,194 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9907
338
+ 2025-09-18 17:33:24,263 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.9904
339
+ 2025-09-18 17:33:28,403 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0595 | Val mean-roc_auc_score: 0.9909
340
+ 2025-09-18 17:33:33,800 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9920
341
+ 2025-09-18 17:33:38,175 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.9931
342
+ 2025-09-18 17:33:40,070 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9919
343
+ 2025-09-18 17:33:44,463 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9914
344
+ 2025-09-18 17:33:49,282 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9914
345
+ 2025-09-18 17:33:53,994 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9913
346
+ 2025-09-18 17:33:58,231 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9912
347
+ 2025-09-18 17:34:02,739 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9914
348
+ 2025-09-18 17:34:07,428 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9912
349
+ 2025-09-18 17:34:09,634 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9912
350
+ 2025-09-18 17:34:14,772 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9914
351
+ 2025-09-18 17:34:19,628 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9913
352
+ 2025-09-18 17:34:24,794 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9914
353
+ 2025-09-18 17:34:30,938 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9915
354
+ 2025-09-18 17:34:37,428 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9915
355
+ 2025-09-18 17:34:41,361 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9914
356
+ 2025-09-18 17:34:47,519 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9914
357
+ 2025-09-18 17:34:53,747 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9916
358
+ 2025-09-18 17:34:59,666 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9915
359
+ 2025-09-18 17:35:05,943 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9913
360
+ 2025-09-18 17:35:10,816 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9913
361
+ 2025-09-18 17:35:17,094 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9916
362
+ 2025-09-18 17:35:23,425 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9915
363
+ 2025-09-18 17:35:29,368 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9914
364
+ 2025-09-18 17:35:29,982 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7225
365
+ 2025-09-18 17:35:30,398 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7356, Std Dev: 0.0094
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250918_191718.log ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 19:17:18,198 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Running benchmark for dataset: clintox
2
+ 2025-09-18 19:17:18,198 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - dataset: clintox, tasks: ['FDA_APPROVED', 'CT_TOX'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 19:17:18,203 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clintox at 2025-09-18_19-17-18
4
+ 2025-09-18 19:17:16,773 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1182 | Val mean-roc_auc_score: 0.9676
5
+ 2025-09-18 19:17:16,773 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
6
+ 2025-09-18 19:17:17,648 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9676
7
+ 2025-09-18 19:17:21,337 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0378 | Val mean-roc_auc_score: 0.9851
8
+ 2025-09-18 19:17:21,539 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
9
+ 2025-09-18 19:17:22,085 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9851
10
+ 2025-09-18 19:17:27,643 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0204 | Val mean-roc_auc_score: 0.9299
11
+ 2025-09-18 19:17:31,522 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0374 | Val mean-roc_auc_score: 0.9813
12
+ 2025-09-18 19:17:35,405 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0224 | Val mean-roc_auc_score: 0.9815
13
+ 2025-09-18 19:17:39,337 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0241 | Val mean-roc_auc_score: 0.9856
14
+ 2025-09-18 19:17:39,934 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
15
+ 2025-09-18 19:17:40,523 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9856
16
+ 2025-09-18 19:17:44,979 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0150 | Val mean-roc_auc_score: 0.9830
17
+ 2025-09-18 19:17:49,967 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0171 | Val mean-roc_auc_score: 0.9858
18
+ 2025-09-18 19:17:50,151 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 296
19
+ 2025-09-18 19:17:50,793 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.9858
20
+ 2025-09-18 19:17:55,455 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0170 | Val mean-roc_auc_score: 0.9823
21
+ 2025-09-18 19:17:59,760 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.9870
22
+ 2025-09-18 19:17:59,951 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 370
23
+ 2025-09-18 19:18:00,666 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9870
24
+ 2025-09-18 19:18:04,741 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.9885
25
+ 2025-09-18 19:18:05,419 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 407
26
+ 2025-09-18 19:18:06,037 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9885
27
+ 2025-09-18 19:18:10,288 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0152 | Val mean-roc_auc_score: 0.9869
28
+ 2025-09-18 19:18:14,359 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.9891
29
+ 2025-09-18 19:18:14,537 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 481
30
+ 2025-09-18 19:18:15,078 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val mean-roc_auc_score: 0.9891
31
+ 2025-09-18 19:18:19,263 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0208 | Val mean-roc_auc_score: 0.9889
32
+ 2025-09-18 19:18:28,158 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.9909
33
+ 2025-09-18 19:18:22,806 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 555
34
+ 2025-09-18 19:18:23,366 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val mean-roc_auc_score: 0.9909
35
+ 2025-09-18 19:18:33,163 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9888
36
+ 2025-09-18 19:18:32,552 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0253 | Val mean-roc_auc_score: 0.9870
37
+ 2025-09-18 19:18:36,473 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.9888
38
+ 2025-09-18 19:18:40,656 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0143 | Val mean-roc_auc_score: 0.9896
39
+ 2025-09-18 19:18:44,581 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.9607
40
+ 2025-09-18 19:18:48,521 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0386 | Val mean-roc_auc_score: 0.9933
41
+ 2025-09-18 19:18:49,061 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 777
42
+ 2025-09-18 19:18:49,609 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val mean-roc_auc_score: 0.9933
43
+ 2025-09-18 19:18:53,124 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.9938
44
+ 2025-09-18 19:18:53,307 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 814
45
+ 2025-09-18 19:18:53,824 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 22 with val mean-roc_auc_score: 0.9938
46
+ 2025-09-18 19:18:57,968 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.9923
47
+ 2025-09-18 19:19:02,393 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9923
48
+ 2025-09-18 19:19:06,664 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9917
49
+ 2025-09-18 19:19:09,945 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9906
50
+ 2025-09-18 19:19:15,391 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9901
51
+ 2025-09-18 19:19:19,432 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.9917
52
+ 2025-09-18 19:19:23,308 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9917
53
+ 2025-09-18 19:19:27,744 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.9933
54
+ 2025-09-18 19:19:31,732 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9923
55
+ 2025-09-18 19:19:36,274 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9923
56
+ 2025-09-18 19:19:39,954 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9896
57
+ 2025-09-18 19:19:43,999 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9719
58
+ 2025-09-18 19:19:48,056 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.9923
59
+ 2025-09-18 19:19:52,744 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9912
60
+ 2025-09-18 19:19:57,151 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9938
61
+ 2025-09-18 19:20:01,233 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9911
62
+ 2025-09-18 19:20:05,250 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9901
63
+ 2025-09-18 19:20:09,049 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9911
64
+ 2025-09-18 19:20:18,418 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9955
65
+ 2025-09-18 19:20:13,463 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1517
66
+ 2025-09-18 19:20:14,095 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 41 with val mean-roc_auc_score: 0.9955
67
+ 2025-09-18 19:20:18,156 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9948
68
+ 2025-09-18 19:20:22,649 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9938
69
+ 2025-09-18 19:20:26,299 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9910
70
+ 2025-09-18 19:20:30,296 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9923
71
+ 2025-09-18 19:20:34,310 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0003 | Val mean-roc_auc_score: 0.9954
72
+ 2025-09-18 19:20:39,017 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9917
73
+ 2025-09-18 19:20:43,215 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9911
74
+ 2025-09-18 19:20:47,132 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9895
75
+ 2025-09-18 19:20:51,138 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9884
76
+ 2025-09-18 19:20:55,060 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9934
77
+ 2025-09-18 19:20:59,550 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9945
78
+ 2025-09-18 19:21:03,608 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9933
79
+ 2025-09-18 19:21:08,115 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9955
80
+ 2025-09-18 19:21:12,687 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9945
81
+ 2025-09-18 19:21:16,678 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9953
82
+ 2025-09-18 19:21:21,213 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0003 | Val mean-roc_auc_score: 0.9955
83
+ 2025-09-18 19:21:25,202 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9966
84
+ 2025-09-18 19:21:25,376 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2146
85
+ 2025-09-18 19:21:25,911 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 58 with val mean-roc_auc_score: 0.9966
86
+ 2025-09-18 19:21:30,001 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9918
87
+ 2025-09-18 19:21:34,010 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9934
88
+ 2025-09-18 19:21:43,527 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9955
89
+ 2025-09-18 19:21:42,642 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9966
90
+ 2025-09-18 19:21:46,689 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9950
91
+ 2025-09-18 19:21:50,606 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9293
92
+ 2025-09-18 19:21:54,607 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9945
93
+ 2025-09-18 19:21:58,255 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9960
94
+ 2025-09-18 19:22:03,189 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9960
95
+ 2025-09-18 19:22:07,139 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9966
96
+ 2025-09-18 19:22:11,171 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9950
97
+ 2025-09-18 19:22:14,979 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9906
98
+ 2025-09-18 19:22:19,087 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.9883
99
+ 2025-09-18 19:22:23,600 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9922
100
+ 2025-09-18 19:22:27,711 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0001 | Val mean-roc_auc_score: 0.9906
101
+ 2025-09-18 19:22:31,753 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9916
102
+ 2025-09-18 19:22:35,828 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9933
103
+ 2025-09-18 19:22:39,922 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9916
104
+ 2025-09-18 19:22:44,419 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9933
105
+ 2025-09-18 19:22:48,386 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9939
106
+ 2025-09-18 19:22:52,840 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9916
107
+ 2025-09-18 19:22:56,778 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9906
108
+ 2025-09-18 19:23:00,323 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9928
109
+ 2025-09-18 19:23:05,695 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9934
110
+ 2025-09-18 19:23:09,830 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9885
111
+ 2025-09-18 19:23:13,810 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9926
112
+ 2025-09-18 19:23:23,634 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9903
113
+ 2025-09-18 19:23:22,718 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9879
114
+ 2025-09-18 19:23:27,067 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9912
115
+ 2025-09-18 19:23:30,868 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9955
116
+ 2025-09-18 19:23:34,947 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9920
117
+ 2025-09-18 19:23:38,901 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9923
118
+ 2025-09-18 19:23:43,288 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9911
119
+ 2025-09-18 19:23:47,872 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.9922
120
+ 2025-09-18 19:23:51,862 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9945
121
+ 2025-09-18 19:23:56,090 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9901
122
+ 2025-09-18 19:23:59,729 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9922
123
+ 2025-09-18 19:24:03,686 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9914
124
+ 2025-09-18 19:24:08,499 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9909
125
+ 2025-09-18 19:24:12,321 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9884
126
+ 2025-09-18 19:24:15,710 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9916
127
+ 2025-09-18 19:24:19,897 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9890
128
+ 2025-09-18 19:24:20,321 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9765
129
+ 2025-09-18 19:24:20,751 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clintox at 2025-09-18_19-24-20
130
+ 2025-09-18 19:24:24,348 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1698 | Val mean-roc_auc_score: 0.9104
131
+ 2025-09-18 19:24:24,348 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
132
+ 2025-09-18 19:24:25,186 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9104
133
+ 2025-09-18 19:24:30,207 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0469 | Val mean-roc_auc_score: 0.9686
134
+ 2025-09-18 19:24:30,379 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
135
+ 2025-09-18 19:24:30,967 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9686
136
+ 2025-09-18 19:24:35,561 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0256 | Val mean-roc_auc_score: 0.9818
137
+ 2025-09-18 19:24:35,755 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 111
138
+ 2025-09-18 19:24:36,359 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9818
139
+ 2025-09-18 19:24:40,369 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0268 | Val mean-roc_auc_score: 0.9819
140
+ 2025-09-18 19:24:40,545 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 148
141
+ 2025-09-18 19:24:41,091 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9819
142
+ 2025-09-18 19:24:45,058 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0285 | Val mean-roc_auc_score: 0.9820
143
+ 2025-09-18 19:24:45,241 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 185
144
+ 2025-09-18 19:24:45,868 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9820
145
+ 2025-09-18 19:24:49,975 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0223 | Val mean-roc_auc_score: 0.9850
146
+ 2025-09-18 19:24:50,568 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
147
+ 2025-09-18 19:24:51,122 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9850
148
+ 2025-09-18 19:24:55,343 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0171 | Val mean-roc_auc_score: 0.9859
149
+ 2025-09-18 19:24:55,523 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 259
150
+ 2025-09-18 19:24:56,098 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.9859
151
+ 2025-09-18 19:24:59,962 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.9830
152
+ 2025-09-18 19:25:03,874 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0131 | Val mean-roc_auc_score: 0.9830
153
+ 2025-09-18 19:25:08,372 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.9827
154
+ 2025-09-18 19:25:12,296 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9849
155
+ 2025-09-18 19:25:16,286 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9876
156
+ 2025-09-18 19:25:16,473 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 444
157
+ 2025-09-18 19:25:17,023 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.9876
158
+ 2025-09-18 19:25:21,047 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0183 | Val mean-roc_auc_score: 0.9829
159
+ 2025-09-18 19:25:24,643 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.9931
160
+ 2025-09-18 19:25:24,829 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 518
161
+ 2025-09-18 19:25:25,408 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val mean-roc_auc_score: 0.9931
162
+ 2025-09-18 19:25:29,528 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.9925
163
+ 2025-09-18 19:25:33,735 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9894
164
+ 2025-09-18 19:25:38,162 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0108 | Val mean-roc_auc_score: 0.9894
165
+ 2025-09-18 19:25:42,170 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9872
166
+ 2025-09-18 19:25:46,024 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9878
167
+ 2025-09-18 19:25:49,947 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9884
168
+ 2025-09-18 19:25:53,964 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0188 | Val mean-roc_auc_score: 0.9859
169
+ 2025-09-18 19:26:04,012 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0246 | Val mean-roc_auc_score: 0.9847
170
+ 2025-09-18 19:26:02,600 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.9852
171
+ 2025-09-18 19:26:06,612 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9869
172
+ 2025-09-18 19:26:10,517 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.9872
173
+ 2025-09-18 19:26:14,609 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9875
174
+ 2025-09-18 19:26:19,941 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9887
175
+ 2025-09-18 19:26:23,931 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.9883
176
+ 2025-09-18 19:26:28,332 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.9882
177
+ 2025-09-18 19:26:31,934 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.9858
178
+ 2025-09-18 19:26:35,955 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0194 | Val mean-roc_auc_score: 0.9870
179
+ 2025-09-18 19:26:40,596 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0189 | Val mean-roc_auc_score: 0.9836
180
+ 2025-09-18 19:26:44,299 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0190 | Val mean-roc_auc_score: 0.9849
181
+ 2025-09-18 19:26:48,226 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9882
182
+ 2025-09-18 19:26:52,303 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9877
183
+ 2025-09-18 19:26:56,506 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.9894
184
+ 2025-09-18 19:27:00,928 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.9926
185
+ 2025-09-18 19:27:04,732 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9910
186
+ 2025-09-18 19:27:08,791 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9900
187
+ 2025-09-18 19:27:13,356 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9886
188
+ 2025-09-18 19:27:17,175 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.9911
189
+ 2025-09-18 19:27:21,742 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9905
190
+ 2025-09-18 19:27:25,721 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9889
191
+ 2025-09-18 19:27:29,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9884
192
+ 2025-09-18 19:27:33,723 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9900
193
+ 2025-09-18 19:27:38,011 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9900
194
+ 2025-09-18 19:27:42,362 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9901
195
+ 2025-09-18 19:27:46,496 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9900
196
+ 2025-09-18 19:27:50,293 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0171 | Val mean-roc_auc_score: 0.9487
197
+ 2025-09-18 19:27:54,262 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0269 | Val mean-roc_auc_score: 0.9911
198
+ 2025-09-18 19:27:58,820 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0167 | Val mean-roc_auc_score: 0.9888
199
+ 2025-09-18 19:28:03,070 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9889
200
+ 2025-09-18 19:28:06,756 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9900
201
+ 2025-09-18 19:28:10,841 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9905
202
+ 2025-09-18 19:28:15,625 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9883
203
+ 2025-09-18 19:28:19,139 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9902
204
+ 2025-09-18 19:28:23,675 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.9900
205
+ 2025-09-18 19:28:28,170 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9944
206
+ 2025-09-18 19:28:28,326 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2146
207
+ 2025-09-18 19:28:28,914 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 58 with val mean-roc_auc_score: 0.9944
208
+ 2025-09-18 19:28:33,124 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9927
209
+ 2025-09-18 19:28:36,996 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9906
210
+ 2025-09-18 19:28:41,026 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9901
211
+ 2025-09-18 19:28:45,505 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9917
212
+ 2025-09-18 19:28:49,207 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9885
213
+ 2025-09-18 19:28:53,958 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9895
214
+ 2025-09-18 19:28:57,879 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9906
215
+ 2025-09-18 19:29:01,980 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9928
216
+ 2025-09-18 19:29:06,272 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9917
217
+ 2025-09-18 19:29:10,226 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9907
218
+ 2025-09-18 19:29:14,301 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9893
219
+ 2025-09-18 19:29:18,369 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9885
220
+ 2025-09-18 19:29:21,802 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9867
221
+ 2025-09-18 19:29:26,201 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9885
222
+ 2025-09-18 19:29:30,220 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9901
223
+ 2025-09-18 19:29:34,273 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9912
224
+ 2025-09-18 19:29:38,793 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9912
225
+ 2025-09-18 19:29:42,852 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9893
226
+ 2025-09-18 19:29:47,249 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9895
227
+ 2025-09-18 19:29:51,069 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9950
228
+ 2025-09-18 19:29:51,219 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2886
229
+ 2025-09-18 19:29:51,761 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 78 with val mean-roc_auc_score: 0.9950
230
+ 2025-09-18 19:29:55,959 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.9913
231
+ 2025-09-18 19:30:00,057 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9885
232
+ 2025-09-18 19:30:04,071 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9879
233
+ 2025-09-18 19:30:09,450 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9856
234
+ 2025-09-18 19:30:13,875 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9896
235
+ 2025-09-18 19:30:17,882 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.9874
236
+ 2025-09-18 19:30:21,382 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9837
237
+ 2025-09-18 19:30:25,284 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9827
238
+ 2025-09-18 19:30:29,774 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9837
239
+ 2025-09-18 19:30:39,229 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9816
240
+ 2025-09-18 19:30:37,576 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9885
241
+ 2025-09-18 19:30:41,742 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9864
242
+ 2025-09-18 19:30:45,837 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9857
243
+ 2025-09-18 19:30:50,248 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0003 | Val mean-roc_auc_score: 0.9842
244
+ 2025-09-18 19:30:54,187 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9875
245
+ 2025-09-18 19:30:58,940 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9892
246
+ 2025-09-18 19:31:02,962 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9876
247
+ 2025-09-18 19:31:06,679 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9851
248
+ 2025-09-18 19:31:11,043 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9870
249
+ 2025-09-18 19:31:15,097 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9825
250
+ 2025-09-18 19:31:19,246 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9901
251
+ 2025-09-18 19:31:23,335 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9888
252
+ 2025-09-18 19:31:23,893 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9382
253
+ 2025-09-18 19:31:23,915 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clintox at 2025-09-18_19-31-23
254
+ 2025-09-18 19:31:28,194 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1242 | Val mean-roc_auc_score: 0.9179
255
+ 2025-09-18 19:31:28,194 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
256
+ 2025-09-18 19:31:28,950 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9179
257
+ 2025-09-18 19:31:33,942 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0452 | Val mean-roc_auc_score: 0.9833
258
+ 2025-09-18 19:31:34,111 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
259
+ 2025-09-18 19:31:34,247 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9833
260
+ 2025-09-18 19:31:38,961 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0146 | Val mean-roc_auc_score: 0.9700
261
+ 2025-09-18 19:31:43,494 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0332 | Val mean-roc_auc_score: 0.9840
262
+ 2025-09-18 19:31:43,681 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 148
263
+ 2025-09-18 19:31:49,315 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9840
264
+ 2025-09-18 19:31:48,544 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0262 | Val mean-roc_auc_score: 0.9858
265
+ 2025-09-18 19:31:48,757 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 185
266
+ 2025-09-18 19:31:54,380 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9858
267
+ 2025-09-18 19:31:53,021 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0192 | Val mean-roc_auc_score: 0.9849
268
+ 2025-09-18 19:31:57,691 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0216 | Val mean-roc_auc_score: 0.9802
269
+ 2025-09-18 19:32:01,586 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0215 | Val mean-roc_auc_score: 0.9891
270
+ 2025-09-18 19:32:01,774 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 296
271
+ 2025-09-18 19:32:02,497 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.9891
272
+ 2025-09-18 19:32:06,406 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0193 | Val mean-roc_auc_score: 0.9916
273
+ 2025-09-18 19:32:06,585 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 333
274
+ 2025-09-18 19:32:07,150 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.9916
275
+ 2025-09-18 19:32:11,283 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.9893
276
+ 2025-09-18 19:32:15,429 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9928
277
+ 2025-09-18 19:32:16,012 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 407
278
+ 2025-09-18 19:32:16,565 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9928
279
+ 2025-09-18 19:32:21,004 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0108 | Val mean-roc_auc_score: 0.9915
280
+ 2025-09-18 19:32:24,975 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.9889
281
+ 2025-09-18 19:32:29,110 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0225 | Val mean-roc_auc_score: 0.9931
282
+ 2025-09-18 19:32:29,280 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 518
283
+ 2025-09-18 19:32:29,873 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val mean-roc_auc_score: 0.9931
284
+ 2025-09-18 19:32:33,980 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.9907
285
+ 2025-09-18 19:32:38,104 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9865
286
+ 2025-09-18 19:32:42,451 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0119 | Val mean-roc_auc_score: 0.9878
287
+ 2025-09-18 19:32:46,520 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.9904
288
+ 2025-09-18 19:32:50,497 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0244 | Val mean-roc_auc_score: 0.9888
289
+ 2025-09-18 19:32:54,746 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.9824
290
+ 2025-09-18 19:32:59,025 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.9830
291
+ 2025-09-18 19:33:03,986 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.9837
292
+ 2025-09-18 19:33:07,548 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.9882
293
+ 2025-09-18 19:33:11,482 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.9865
294
+ 2025-09-18 19:33:15,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0130 | Val mean-roc_auc_score: 0.9865
295
+ 2025-09-18 19:33:19,655 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9859
296
+ 2025-09-18 19:33:24,573 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9871
297
+ 2025-09-18 19:33:29,091 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9916
298
+ 2025-09-18 19:33:33,116 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9872
299
+ 2025-09-18 19:33:37,030 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9862
300
+ 2025-09-18 19:33:40,985 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9851
301
+ 2025-09-18 19:33:45,513 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9925
302
+ 2025-09-18 19:33:49,566 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9899
303
+ 2025-09-18 19:33:53,533 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9916
304
+ 2025-09-18 19:33:57,551 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9916
305
+ 2025-09-18 19:34:01,614 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9908
306
+ 2025-09-18 19:34:05,924 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9907
307
+ 2025-09-18 19:34:09,839 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9910
308
+ 2025-09-18 19:34:14,423 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9867
309
+ 2025-09-18 19:34:18,514 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9905
310
+ 2025-09-18 19:34:22,312 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.9921
311
+ 2025-09-18 19:34:26,655 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9899
312
+ 2025-09-18 19:34:30,185 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9877
313
+ 2025-09-18 19:34:39,716 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.9914
314
+ 2025-09-18 19:34:38,630 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.9889
315
+ 2025-09-18 19:34:42,569 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0288 | Val mean-roc_auc_score: 0.9888
316
+ 2025-09-18 19:34:47,208 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.9899
317
+ 2025-09-18 19:34:51,333 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9917
318
+ 2025-09-18 19:34:54,802 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9888
319
+ 2025-09-18 19:35:04,614 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9911
320
+ 2025-09-18 19:35:03,483 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9897
321
+ 2025-09-18 19:35:08,010 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9912
322
+ 2025-09-18 19:35:12,401 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9895
323
+ 2025-09-18 19:35:16,499 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9917
324
+ 2025-09-18 19:35:21,601 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9912
325
+ 2025-09-18 19:35:25,219 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.9926
326
+ 2025-09-18 19:35:29,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.9943
327
+ 2025-09-18 19:35:29,847 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2109
328
+ 2025-09-18 19:35:30,434 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 57 with val mean-roc_auc_score: 0.9943
329
+ 2025-09-18 19:35:34,509 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9898
330
+ 2025-09-18 19:35:38,668 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.9915
331
+ 2025-09-18 19:35:42,622 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9926
332
+ 2025-09-18 19:35:46,339 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9899
333
+ 2025-09-18 19:35:50,773 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9921
334
+ 2025-09-18 19:35:54,392 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9921
335
+ 2025-09-18 19:35:58,945 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9921
336
+ 2025-09-18 19:36:02,934 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9921
337
+ 2025-09-18 19:36:06,882 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9916
338
+ 2025-09-18 19:36:11,050 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9916
339
+ 2025-09-18 19:36:15,108 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9910
340
+ 2025-09-18 19:36:19,614 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9898
341
+ 2025-09-18 19:36:23,577 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9927
342
+ 2025-09-18 19:36:27,483 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9927
343
+ 2025-09-18 19:36:32,057 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9927
344
+ 2025-09-18 19:36:36,135 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9931
345
+ 2025-09-18 19:36:39,764 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9916
346
+ 2025-09-18 19:36:44,150 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9916
347
+ 2025-09-18 19:36:48,103 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9916
348
+ 2025-09-18 19:36:52,490 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9916
349
+ 2025-09-18 19:36:55,908 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.9911
350
+ 2025-09-18 19:36:59,845 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.9949
351
+ 2025-09-18 19:36:59,992 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2923
352
+ 2025-09-18 19:37:00,570 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 79 with val mean-roc_auc_score: 0.9949
353
+ 2025-09-18 19:37:09,852 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9943
354
+ 2025-09-18 19:37:08,803 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9940
355
+ 2025-09-18 19:37:14,143 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9938
356
+ 2025-09-18 19:37:18,156 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9916
357
+ 2025-09-18 19:37:22,165 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9916
358
+ 2025-09-18 19:37:25,873 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9916
359
+ 2025-09-18 19:37:29,863 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9910
360
+ 2025-09-18 19:37:39,876 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9916
361
+ 2025-09-18 19:37:38,812 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9910
362
+ 2025-09-18 19:37:42,730 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9916
363
+ 2025-09-18 19:37:46,661 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9905
364
+ 2025-09-18 19:37:50,907 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9916
365
+ 2025-09-18 19:37:55,032 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9910
366
+ 2025-09-18 19:37:59,412 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9921
367
+ 2025-09-18 19:38:03,475 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9921
368
+ 2025-09-18 19:38:07,430 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9905
369
+ 2025-09-18 19:38:10,882 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9927
370
+ 2025-09-18 19:38:15,389 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9918
371
+ 2025-09-18 19:38:19,754 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.9971
372
+ 2025-09-18 19:38:24,970 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 3626
373
+ 2025-09-18 19:38:19,994 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 98 with val mean-roc_auc_score: 0.9971
374
+ 2025-09-18 19:38:24,519 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9971
375
+ 2025-09-18 19:38:28,129 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9949
376
+ 2025-09-18 19:38:28,495 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9846
377
+ 2025-09-18 19:38:28,938 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.9664, Std Dev: 0.0202
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250923_141720.log ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-23 14:17:20,052 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Running benchmark for dataset: hiv
2
+ 2025-09-23 14:17:20,052 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - dataset: hiv, tasks: ['HIV_active'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-23 14:17:20,057 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset hiv at 2025-09-23_14-17-20
4
+ 2025-09-23 14:18:16,670 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1256 | Val mean-roc_auc_score: 0.8281
5
+ 2025-09-23 14:18:16,670 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
6
+ 2025-09-23 14:18:17,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8281
7
+ 2025-09-23 14:19:18,351 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1013 | Val mean-roc_auc_score: 0.8160
8
+ 2025-09-23 14:20:20,135 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1188 | Val mean-roc_auc_score: 0.8265
9
+ 2025-09-23 14:21:21,362 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0796 | Val mean-roc_auc_score: 0.8280
10
+ 2025-09-23 14:22:23,227 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0629 | Val mean-roc_auc_score: 0.8219
11
+ 2025-09-23 14:23:24,742 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0532 | Val mean-roc_auc_score: 0.8135
12
+ 2025-09-23 14:24:26,933 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0418 | Val mean-roc_auc_score: 0.7861
13
+ 2025-09-23 14:25:28,499 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0215 | Val mean-roc_auc_score: 0.8122
14
+ 2025-09-23 14:26:30,419 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0322 | Val mean-roc_auc_score: 0.7835
15
+ 2025-09-23 14:27:31,948 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0214 | Val mean-roc_auc_score: 0.7852
16
+ 2025-09-23 14:28:33,852 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0130 | Val mean-roc_auc_score: 0.7632
17
+ 2025-09-23 14:29:35,784 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7953
18
+ 2025-09-23 14:30:37,618 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.7984
19
+ 2025-09-23 14:31:39,306 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0173 | Val mean-roc_auc_score: 0.7914
20
+ 2025-09-23 14:32:40,772 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.8091
21
+ 2025-09-23 14:33:42,514 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0173 | Val mean-roc_auc_score: 0.8252
22
+ 2025-09-23 14:34:44,228 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.8182
23
+ 2025-09-23 14:35:45,779 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.8092
24
+ 2025-09-23 14:36:47,773 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.7999
25
+ 2025-09-23 14:37:49,287 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.8071
26
+ 2025-09-23 14:38:51,132 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8117
27
+ 2025-09-23 14:39:53,053 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.8028
28
+ 2025-09-23 14:40:54,584 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0124 | Val mean-roc_auc_score: 0.7986
29
+ 2025-09-23 14:41:56,909 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.7920
30
+ 2025-09-23 14:42:58,594 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.7796
31
+ 2025-09-23 14:44:00,300 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0354 | Val mean-roc_auc_score: 0.7873
32
+ 2025-09-23 14:45:02,002 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.7898
33
+ 2025-09-23 14:46:03,395 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.7861
34
+ 2025-09-23 14:47:05,757 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7928
35
+ 2025-09-23 14:48:07,585 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.7936
36
+ 2025-09-23 14:49:09,476 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.7932
37
+ 2025-09-23 14:50:11,447 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.7818
38
+ 2025-09-23 14:51:13,116 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8042
39
+ 2025-09-23 14:52:14,830 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7896
40
+ 2025-09-23 14:53:16,435 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7855
41
+ 2025-09-23 14:54:18,392 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7916
42
+ 2025-09-23 14:55:21,422 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7898
43
+ 2025-09-23 14:56:23,000 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.7919
44
+ 2025-09-23 14:57:25,079 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.7865
45
+ 2025-09-23 14:58:26,579 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7880
46
+ 2025-09-23 14:59:28,359 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7946
47
+ 2025-09-23 15:00:30,113 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7954
48
+ 2025-09-23 15:01:31,754 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.7919
49
+ 2025-09-23 15:02:33,617 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7915
50
+ 2025-09-23 15:03:35,151 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.7934
51
+ 2025-09-23 15:04:37,039 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7944
52
+ 2025-09-23 15:05:38,863 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7972
53
+ 2025-09-23 15:06:40,314 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7942
54
+ 2025-09-23 15:07:42,206 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.7924
55
+ 2025-09-23 15:08:43,787 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.7883
56
+ 2025-09-23 15:09:45,601 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.8006
57
+ 2025-09-23 15:10:47,394 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8001
58
+ 2025-09-23 15:11:49,175 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7981
59
+ 2025-09-23 15:12:51,318 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.8000
60
+ 2025-09-23 15:14:10,241 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8075
61
+ 2025-09-23 15:15:26,591 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.7993
62
+ 2025-09-23 15:16:42,619 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8048
63
+ 2025-09-23 15:17:58,524 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8034
64
+ 2025-09-23 15:19:24,463 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8022
65
+ 2025-09-23 15:20:44,406 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8060
66
+ 2025-09-23 15:22:12,379 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8041
67
+ 2025-09-23 15:23:44,137 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8044
68
+ 2025-09-23 15:25:05,459 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.8032
69
+ 2025-09-23 15:26:29,902 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8011
70
+ 2025-09-23 15:27:36,547 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.8021
71
+ 2025-09-23 15:28:38,737 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8012
72
+ 2025-09-23 15:29:42,259 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7991
73
+ 2025-09-23 15:30:44,208 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.7985
74
+ 2025-09-23 15:31:46,499 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.7984
75
+ 2025-09-23 15:32:52,005 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.7983
76
+ 2025-09-23 15:34:21,854 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.7956
77
+ 2025-09-23 15:35:51,489 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7979
78
+ 2025-09-23 15:37:21,358 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7983
79
+ 2025-09-23 15:38:51,360 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7992
80
+ 2025-09-23 15:40:23,391 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.7963
81
+ 2025-09-23 15:41:53,152 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7994
82
+ 2025-09-23 15:43:22,743 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7953
83
+ 2025-09-23 15:44:54,063 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.7936
84
+ 2025-09-23 15:46:24,488 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7931
85
+ 2025-09-23 15:47:54,210 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7954
86
+ 2025-09-23 15:49:25,480 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.7935
87
+ 2025-09-23 15:50:55,487 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.7959
88
+ 2025-09-23 15:52:25,520 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.7929
89
+ 2025-09-23 15:53:56,076 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7991
90
+ 2025-09-23 15:55:26,183 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7975
91
+ 2025-09-23 15:56:56,155 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.7971
92
+ 2025-09-23 15:58:27,029 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7976
93
+ 2025-09-23 15:59:57,284 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7965
94
+ 2025-09-23 16:01:27,466 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.7944
95
+ 2025-09-23 16:02:57,321 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7971
96
+ 2025-09-23 16:04:27,272 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.7960
97
+ 2025-09-23 16:05:57,748 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.7965
98
+ 2025-09-23 16:07:26,704 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.7968
99
+ 2025-09-23 16:08:56,127 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.7933
100
+ 2025-09-23 16:10:25,529 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7924
101
+ 2025-09-23 16:11:55,423 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.7954
102
+ 2025-09-23 16:13:26,275 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7903
103
+ 2025-09-23 16:14:54,967 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7947
104
+ 2025-09-23 16:16:24,345 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7946
105
+ 2025-09-23 16:17:54,362 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7965
106
+ 2025-09-23 16:17:59,404 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7770
107
+ 2025-09-23 16:17:59,999 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset hiv at 2025-09-23_16-17-59
108
+ 2025-09-23 16:19:21,234 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1140 | Val mean-roc_auc_score: 0.8283
109
+ 2025-09-23 16:19:21,234 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
110
+ 2025-09-23 16:19:21,827 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8283
111
+ 2025-09-23 16:20:51,626 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1065 | Val mean-roc_auc_score: 0.8377
112
+ 2025-09-23 16:20:51,764 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
113
+ 2025-09-23 16:20:52,299 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8377
114
+ 2025-09-23 16:22:22,553 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0675 | Val mean-roc_auc_score: 0.8419
115
+ 2025-09-23 16:22:22,699 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 3081
116
+ 2025-09-23 16:22:23,256 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.8419
117
+ 2025-09-23 16:23:52,567 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1270 | Val mean-roc_auc_score: 0.8392
118
+ 2025-09-23 16:25:22,551 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.8259
119
+ 2025-09-23 16:26:52,338 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0491 | Val mean-roc_auc_score: 0.8394
120
+ 2025-09-23 16:28:21,967 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0316 | Val mean-roc_auc_score: 0.8386
121
+ 2025-09-23 16:29:51,269 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0275 | Val mean-roc_auc_score: 0.8197
122
+ 2025-09-23 16:31:21,351 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0227 | Val mean-roc_auc_score: 0.8304
123
+ 2025-09-23 16:32:51,464 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0192 | Val mean-roc_auc_score: 0.8181
124
+ 2025-09-23 16:34:21,440 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.8160
125
+ 2025-09-23 16:35:52,600 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0194 | Val mean-roc_auc_score: 0.8226
126
+ 2025-09-23 16:37:21,742 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.8107
127
+ 2025-09-23 16:38:49,040 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.8112
128
+ 2025-09-23 16:40:18,372 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8175
129
+ 2025-09-23 16:41:47,577 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8115
130
+ 2025-09-23 16:43:17,209 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.8171
131
+ 2025-09-23 16:44:47,623 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.8157
132
+ 2025-09-23 16:46:16,983 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.8164
133
+ 2025-09-23 16:47:44,595 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8121
134
+ 2025-09-23 16:49:14,822 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.8110
135
+ 2025-09-23 16:50:43,896 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8091
136
+ 2025-09-23 16:52:13,012 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8100
137
+ 2025-09-23 16:53:41,635 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8071
138
+ 2025-09-23 16:55:10,727 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.8026
139
+ 2025-09-23 16:56:39,114 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.8013
140
+ 2025-09-23 16:58:06,509 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.8025
141
+ 2025-09-23 16:59:34,873 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.8074
142
+ 2025-09-23 17:01:03,943 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.8105
143
+ 2025-09-23 17:02:32,081 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8088
144
+ 2025-09-23 17:04:01,152 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.8079
145
+ 2025-09-23 17:05:30,175 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8096
146
+ 2025-09-23 17:06:56,415 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8083
147
+ 2025-09-23 17:08:24,756 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.8088
148
+ 2025-09-23 17:09:52,373 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8099
149
+ 2025-09-23 17:11:20,493 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8078
150
+ 2025-09-23 17:12:49,704 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8044
151
+ 2025-09-23 17:14:16,731 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.8064
152
+ 2025-09-23 17:15:45,571 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8106
153
+ 2025-09-23 17:17:12,948 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8069
154
+ 2025-09-23 17:18:41,625 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0003 | Val mean-roc_auc_score: 0.8071
155
+ 2025-09-23 17:20:09,802 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8040
156
+ 2025-09-23 17:21:37,226 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8028
157
+ 2025-09-23 17:23:05,279 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.8058
158
+ 2025-09-23 17:24:33,087 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.8065
159
+ 2025-09-23 17:25:59,337 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8022
160
+ 2025-09-23 17:27:27,080 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8080
161
+ 2025-09-23 17:28:55,060 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8086
162
+ 2025-09-23 17:30:21,686 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8004
163
+ 2025-09-23 17:31:48,875 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8069
164
+ 2025-09-23 17:33:16,616 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8073
165
+ 2025-09-23 17:34:45,499 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8059
166
+ 2025-09-23 17:36:13,632 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.8085
167
+ 2025-09-23 17:37:42,936 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8076
168
+ 2025-09-23 17:39:11,004 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.8080
169
+ 2025-09-23 17:40:39,470 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8120
170
+ 2025-09-23 17:42:07,593 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8125
171
+ 2025-09-23 17:43:34,859 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8117
172
+ 2025-09-23 17:45:03,210 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.8127
173
+ 2025-09-23 17:46:30,342 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8120
174
+ 2025-09-23 17:47:59,182 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.8126
175
+ 2025-09-23 17:49:27,963 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.8122
176
+ 2025-09-23 17:50:54,715 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0001 | Val mean-roc_auc_score: 0.8116
177
+ 2025-09-23 17:52:22,358 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.8110
178
+ 2025-09-23 17:53:50,200 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.8108
179
+ 2025-09-23 17:55:17,922 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8098
180
+ 2025-09-23 17:56:45,906 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8118
181
+ 2025-09-23 17:58:13,663 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8119
182
+ 2025-09-23 17:59:40,910 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8114
183
+ 2025-09-23 18:01:08,078 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8112
184
+ 2025-09-23 18:02:36,465 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8099
185
+ 2025-09-23 18:04:04,085 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8099
186
+ 2025-09-23 18:05:32,030 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8090
187
+ 2025-09-23 18:06:59,469 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8116
188
+ 2025-09-23 18:08:28,100 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.8105
189
+ 2025-09-23 18:09:55,686 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8104
190
+ 2025-09-23 18:11:23,806 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8081
191
+ 2025-09-23 18:12:51,042 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.8090
192
+ 2025-09-23 18:14:19,071 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.8091
193
+ 2025-09-23 18:15:46,520 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8098
194
+ 2025-09-23 18:17:14,596 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8114
195
+ 2025-09-23 18:18:42,426 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8123
196
+ 2025-09-23 18:20:09,478 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.8113
197
+ 2025-09-23 18:21:37,258 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8124
198
+ 2025-09-23 18:23:04,147 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8105
199
+ 2025-09-23 18:24:31,784 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8074
200
+ 2025-09-23 18:26:00,386 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8098
201
+ 2025-09-23 18:27:27,153 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8078
202
+ 2025-09-23 18:28:54,636 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0000 | Val mean-roc_auc_score: 0.8082
203
+ 2025-09-23 18:30:22,124 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.8105
204
+ 2025-09-23 18:31:49,980 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.8089
205
+ 2025-09-23 18:33:17,676 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8092
206
+ 2025-09-23 18:34:45,538 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.8103
207
+ 2025-09-23 18:36:12,630 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8100
208
+ 2025-09-23 18:37:39,741 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.8094
209
+ 2025-09-23 18:39:07,811 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8088
210
+ 2025-09-23 18:40:35,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8077
211
+ 2025-09-23 18:42:02,338 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.8088
212
+ 2025-09-23 18:43:30,699 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8093
213
+ 2025-09-23 18:44:57,488 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8081
214
+ 2025-09-23 18:45:02,213 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7795
215
+ 2025-09-23 18:45:02,868 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset hiv at 2025-09-23_18-45-02
216
+ 2025-09-23 18:46:25,375 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1053 | Val mean-roc_auc_score: 0.7946
217
+ 2025-09-23 18:46:25,375 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
218
+ 2025-09-23 18:46:25,923 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7946
219
+ 2025-09-23 18:47:54,176 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0984 | Val mean-roc_auc_score: 0.8056
220
+ 2025-09-23 18:47:54,312 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
221
+ 2025-09-23 18:47:54,840 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8056
222
+ 2025-09-23 18:49:21,933 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1150 | Val mean-roc_auc_score: 0.8052
223
+ 2025-09-23 18:50:49,453 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0334 | Val mean-roc_auc_score: 0.8371
224
+ 2025-09-23 18:50:49,599 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 4108
225
+ 2025-09-23 18:50:50,133 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.8371
226
+ 2025-09-23 18:52:18,706 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.8082
227
+ 2025-09-23 18:53:45,941 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0484 | Val mean-roc_auc_score: 0.8160
228
+ 2025-09-23 18:55:13,906 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0381 | Val mean-roc_auc_score: 0.8204
229
+ 2025-09-23 18:56:41,699 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0339 | Val mean-roc_auc_score: 0.7994
230
+ 2025-09-23 18:58:10,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0233 | Val mean-roc_auc_score: 0.8052
231
+ 2025-09-23 18:59:37,092 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.8101
232
+ 2025-09-23 19:01:04,976 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0205 | Val mean-roc_auc_score: 0.7883
233
+ 2025-09-23 19:02:33,411 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0142 | Val mean-roc_auc_score: 0.7945
234
+ 2025-09-23 19:04:00,451 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0267 | Val mean-roc_auc_score: 0.7996
235
+ 2025-09-23 19:05:28,025 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7819
236
+ 2025-09-23 19:06:55,581 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.7789
237
+ 2025-09-23 19:08:06,684 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.7926
238
+ 2025-09-23 19:09:11,244 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0095 | Val mean-roc_auc_score: 0.7883
239
+ 2025-09-23 19:10:14,950 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7933
240
+ 2025-09-23 19:11:19,561 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.7823
241
+ 2025-09-23 19:12:23,062 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.7910
242
+ 2025-09-23 19:13:27,294 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.7851
243
+ 2025-09-23 19:14:31,603 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.7896
244
+ 2025-09-23 19:15:35,565 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7871
245
+ 2025-09-23 19:16:39,907 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.7891
246
+ 2025-09-23 19:17:43,684 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.7846
247
+ 2025-09-23 19:18:48,229 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0006 | Val mean-roc_auc_score: 0.7863
248
+ 2025-09-23 19:19:52,903 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7869
249
+ 2025-09-23 19:20:56,504 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.7813
250
+ 2025-09-23 19:22:00,899 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7887
251
+ 2025-09-23 19:23:04,438 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.7908
252
+ 2025-09-23 19:24:08,828 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7886
253
+ 2025-09-23 19:25:13,496 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7885
254
+ 2025-09-23 19:26:17,486 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.7942
255
+ 2025-09-23 19:27:21,714 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.7955
256
+ 2025-09-23 19:28:25,874 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.7953
257
+ 2025-09-23 19:29:30,158 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.7916
258
+ 2025-09-23 19:30:35,530 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7920
259
+ 2025-09-23 19:31:39,102 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7987
260
+ 2025-09-23 19:32:43,624 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.8124
261
+ 2025-09-23 19:33:47,202 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8060
262
+ 2025-09-23 19:34:51,546 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.8062
263
+ 2025-09-23 19:35:55,979 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.8042
264
+ 2025-09-23 19:36:59,486 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8040
265
+ 2025-09-23 19:38:03,808 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8025
266
+ 2025-09-23 19:39:07,427 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.8006
267
+ 2025-09-23 19:40:11,947 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.8032
268
+ 2025-09-23 19:41:16,399 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.7978
269
+ 2025-09-23 19:42:19,830 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.7992
270
+ 2025-09-23 19:43:24,153 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7978
271
+ 2025-09-23 19:44:27,967 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.7997
272
+ 2025-09-23 19:45:32,240 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7951
273
+ 2025-09-23 19:46:36,568 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.8002
274
+ 2025-09-23 19:47:40,320 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7998
275
+ 2025-09-23 19:48:44,632 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7975
276
+ 2025-09-23 19:49:48,537 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.7966
277
+ 2025-09-23 19:50:52,868 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.7951
278
+ 2025-09-23 19:51:57,474 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.7916
279
+ 2025-09-23 19:53:01,535 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7944
280
+ 2025-09-23 19:54:06,021 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7930
281
+ 2025-09-23 19:55:09,672 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.7920
282
+ 2025-09-23 19:56:13,980 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.7897
283
+ 2025-09-23 19:57:18,362 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.7915
284
+ 2025-09-23 19:58:22,004 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0000 | Val mean-roc_auc_score: 0.7936
285
+ 2025-09-23 19:59:26,070 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7933
286
+ 2025-09-23 20:00:29,838 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7969
287
+ 2025-09-23 20:01:34,199 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7935
288
+ 2025-09-23 20:02:38,695 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.7941
289
+ 2025-09-23 20:03:42,507 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7972
290
+ 2025-09-23 20:04:46,805 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.7955
291
+ 2025-09-23 20:05:50,424 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7958
292
+ 2025-09-23 20:06:54,984 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.7966
293
+ 2025-09-23 20:07:59,363 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.7956
294
+ 2025-09-23 20:09:03,061 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.7958
295
+ 2025-09-23 20:10:07,390 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7966
296
+ 2025-09-23 20:11:12,285 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7991
297
+ 2025-09-23 20:12:16,791 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.7989
298
+ 2025-09-23 20:13:21,132 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.7988
299
+ 2025-09-23 20:14:24,840 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.7958
300
+ 2025-09-23 20:15:29,333 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.7953
301
+ 2025-09-23 20:16:32,756 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7965
302
+ 2025-09-23 20:17:37,253 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.7955
303
+ 2025-09-23 20:18:41,543 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.7969
304
+ 2025-09-23 20:19:45,194 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.7990
305
+ 2025-09-23 20:20:49,488 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.7979
306
+ 2025-09-23 20:21:53,419 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.7958
307
+ 2025-09-23 20:22:57,876 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.8001
308
+ 2025-09-23 20:24:02,217 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8014
309
+ 2025-09-23 20:25:05,743 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.7990
310
+ 2025-09-23 20:26:10,037 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.7976
311
+ 2025-09-23 20:27:13,321 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7977
312
+ 2025-09-23 20:28:17,754 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7984
313
+ 2025-09-23 20:29:22,131 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.7974
314
+ 2025-09-23 20:30:25,846 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.7986
315
+ 2025-09-23 20:31:30,447 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.7971
316
+ 2025-09-23 20:32:34,234 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7984
317
+ 2025-09-23 20:33:38,740 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.7969
318
+ 2025-09-23 20:34:42,833 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.7969
319
+ 2025-09-23 20:35:46,291 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.7974
320
+ 2025-09-23 20:36:50,770 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.8015
321
+ 2025-09-23 20:37:54,654 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.7992
322
+ 2025-09-23 20:37:58,259 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7684
323
+ 2025-09-23 20:37:59,094 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7750, Std Dev: 0.0048
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250918_185547.log ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 18:55:47,014 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Running benchmark for dataset: sider
2
+ 2025-09-18 18:55:47,014 - logs_modchembert_sider_epochs100_batch_size32 - INFO - dataset: sider, tasks: ['Hepatobiliary disorders', 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders', 'Investigations', 'Musculoskeletal and connective tissue disorders', 'Gastrointestinal disorders', 'Social circumstances', 'Immune system disorders', 'Reproductive system and breast disorders', 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)', 'General disorders and administration site conditions', 'Endocrine disorders', 'Surgical and medical procedures', 'Vascular disorders', 'Blood and lymphatic system disorders', 'Skin and subcutaneous tissue disorders', 'Congenital, familial and genetic disorders', 'Infections and infestations', 'Respiratory, thoracic and mediastinal disorders', 'Psychiatric disorders', 'Renal and urinary disorders', 'Pregnancy, puerperium and perinatal conditions', 'Ear and labyrinth disorders', 'Cardiac disorders', 'Nervous system disorders', 'Injury, poisoning and procedural complications'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 18:55:47,019 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset sider at 2025-09-18_18-55-47
4
+ 2025-09-18 18:55:51,204 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5429 | Val mean-roc_auc_score: 0.5435
5
+ 2025-09-18 18:55:51,204 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
6
+ 2025-09-18 18:55:51,822 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5435
7
+ 2025-09-18 18:55:55,189 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5071 | Val mean-roc_auc_score: 0.5624
8
+ 2025-09-18 18:55:55,375 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
9
+ 2025-09-18 18:55:55,947 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5624
10
+ 2025-09-18 18:56:01,578 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4875 | Val mean-roc_auc_score: 0.5764
11
+ 2025-09-18 18:56:01,800 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
12
+ 2025-09-18 18:56:02,658 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5764
13
+ 2025-09-18 18:56:06,822 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4679 | Val mean-roc_auc_score: 0.5990
14
+ 2025-09-18 18:56:06,990 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
15
+ 2025-09-18 18:56:07,518 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5990
16
+ 2025-09-18 18:56:12,513 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4500 | Val mean-roc_auc_score: 0.5837
17
+ 2025-09-18 18:56:16,455 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4250 | Val mean-roc_auc_score: 0.5893
18
+ 2025-09-18 18:56:21,270 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3875 | Val mean-roc_auc_score: 0.6154
19
+ 2025-09-18 18:56:21,411 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 245
20
+ 2025-09-18 18:56:21,938 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.6154
21
+ 2025-09-18 18:56:26,104 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3554 | Val mean-roc_auc_score: 0.6204
22
+ 2025-09-18 18:56:26,275 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
23
+ 2025-09-18 18:56:26,842 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.6204
24
+ 2025-09-18 18:56:28,218 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3312 | Val mean-roc_auc_score: 0.6249
25
+ 2025-09-18 18:56:28,391 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
26
+ 2025-09-18 18:56:28,974 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.6249
27
+ 2025-09-18 18:56:33,507 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.2964 | Val mean-roc_auc_score: 0.6286
28
+ 2025-09-18 18:56:33,693 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 350
29
+ 2025-09-18 18:56:34,251 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.6286
30
+ 2025-09-18 18:56:39,370 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2750 | Val mean-roc_auc_score: 0.6142
31
+ 2025-09-18 18:56:44,195 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2672 | Val mean-roc_auc_score: 0.6135
32
+ 2025-09-18 18:56:48,510 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2429 | Val mean-roc_auc_score: 0.6187
33
+ 2025-09-18 18:56:52,951 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2339 | Val mean-roc_auc_score: 0.6138
34
+ 2025-09-18 18:56:57,139 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2263 | Val mean-roc_auc_score: 0.6272
35
+ 2025-09-18 18:56:58,666 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2054 | Val mean-roc_auc_score: 0.6187
36
+ 2025-09-18 18:57:03,476 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1955 | Val mean-roc_auc_score: 0.6076
37
+ 2025-09-18 18:57:07,839 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1875 | Val mean-roc_auc_score: 0.6246
38
+ 2025-09-18 18:57:11,903 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1768 | Val mean-roc_auc_score: 0.6121
39
+ 2025-09-18 18:57:16,250 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1741 | Val mean-roc_auc_score: 0.5986
40
+ 2025-09-18 18:57:20,509 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1652 | Val mean-roc_auc_score: 0.6066
41
+ 2025-09-18 18:57:25,637 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1589 | Val mean-roc_auc_score: 0.6097
42
+ 2025-09-18 18:57:26,687 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1672 | Val mean-roc_auc_score: 0.6179
43
+ 2025-09-18 18:57:30,921 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1491 | Val mean-roc_auc_score: 0.6063
44
+ 2025-09-18 18:57:35,149 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1455 | Val mean-roc_auc_score: 0.6161
45
+ 2025-09-18 18:57:39,649 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1398 | Val mean-roc_auc_score: 0.6140
46
+ 2025-09-18 18:57:44,471 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1348 | Val mean-roc_auc_score: 0.6131
47
+ 2025-09-18 18:57:49,111 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1295 | Val mean-roc_auc_score: 0.6112
48
+ 2025-09-18 18:57:54,563 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1323 | Val mean-roc_auc_score: 0.6087
49
+ 2025-09-18 18:57:56,000 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1268 | Val mean-roc_auc_score: 0.6135
50
+ 2025-09-18 18:58:00,326 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1223 | Val mean-roc_auc_score: 0.6121
51
+ 2025-09-18 18:58:05,217 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1234 | Val mean-roc_auc_score: 0.6190
52
+ 2025-09-18 18:58:09,745 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1161 | Val mean-roc_auc_score: 0.6077
53
+ 2025-09-18 18:58:14,525 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.6107
54
+ 2025-09-18 18:58:19,347 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1138 | Val mean-roc_auc_score: 0.6027
55
+ 2025-09-18 18:58:24,416 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1098 | Val mean-roc_auc_score: 0.6106
56
+ 2025-09-18 18:58:29,893 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.6038
57
+ 2025-09-18 18:58:34,949 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1109 | Val mean-roc_auc_score: 0.6099
58
+ 2025-09-18 18:58:37,140 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1058 | Val mean-roc_auc_score: 0.6024
59
+ 2025-09-18 18:58:41,608 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.6037
60
+ 2025-09-18 18:58:51,415 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1013 | Val mean-roc_auc_score: 0.6098
61
+ 2025-09-18 18:58:51,125 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.6091
62
+ 2025-09-18 18:58:55,459 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1133 | Val mean-roc_auc_score: 0.6024
63
+ 2025-09-18 18:58:59,393 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0996 | Val mean-roc_auc_score: 0.6106
64
+ 2025-09-18 18:59:03,102 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0982 | Val mean-roc_auc_score: 0.6030
65
+ 2025-09-18 18:59:06,794 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.6016
66
+ 2025-09-18 18:59:11,470 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0924 | Val mean-roc_auc_score: 0.6070
67
+ 2025-09-18 18:59:16,045 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6060
68
+ 2025-09-18 18:59:20,648 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0979 | Val mean-roc_auc_score: 0.6076
69
+ 2025-09-18 18:59:24,882 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6049
70
+ 2025-09-18 18:59:29,395 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0929 | Val mean-roc_auc_score: 0.6026
71
+ 2025-09-18 18:59:34,240 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0961 | Val mean-roc_auc_score: 0.6072
72
+ 2025-09-18 18:59:38,758 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6040
73
+ 2025-09-18 18:59:42,998 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6096
74
+ 2025-09-18 18:59:44,452 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0856 | Val mean-roc_auc_score: 0.6058
75
+ 2025-09-18 18:59:48,727 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.6022
76
+ 2025-09-18 18:59:53,704 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.6065
77
+ 2025-09-18 18:59:59,155 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.6093
78
+ 2025-09-18 19:00:03,664 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.6021
79
+ 2025-09-18 19:00:08,327 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0835 | Val mean-roc_auc_score: 0.6018
80
+ 2025-09-18 19:00:12,668 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.5975
81
+ 2025-09-18 19:00:15,188 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.6038
82
+ 2025-09-18 19:00:19,684 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0824 | Val mean-roc_auc_score: 0.6030
83
+ 2025-09-18 19:00:23,347 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.6063
84
+ 2025-09-18 19:00:26,888 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0808 | Val mean-roc_auc_score: 0.6082
85
+ 2025-09-18 19:00:31,393 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6047
86
+ 2025-09-18 19:00:36,132 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.6013
87
+ 2025-09-18 19:00:40,383 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.6012
88
+ 2025-09-18 19:00:44,322 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0865 | Val mean-roc_auc_score: 0.6107
89
+ 2025-09-18 19:00:48,417 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.6090
90
+ 2025-09-18 19:00:52,558 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.6080
91
+ 2025-09-18 19:00:58,129 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0789 | Val mean-roc_auc_score: 0.6122
92
+ 2025-09-18 19:01:02,199 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.6031
93
+ 2025-09-18 19:01:06,754 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.6058
94
+ 2025-09-18 19:01:12,297 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0769 | Val mean-roc_auc_score: 0.6010
95
+ 2025-09-18 19:01:17,418 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6028
96
+ 2025-09-18 19:01:22,479 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6072
97
+ 2025-09-18 19:01:23,898 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0766 | Val mean-roc_auc_score: 0.6068
98
+ 2025-09-18 19:01:27,584 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.6084
99
+ 2025-09-18 19:01:31,472 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.6007
100
+ 2025-09-18 19:01:35,938 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.6116
101
+ 2025-09-18 19:01:40,936 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.6099
102
+ 2025-09-18 19:01:44,938 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0820 | Val mean-roc_auc_score: 0.6059
103
+ 2025-09-18 19:01:49,487 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.6042
104
+ 2025-09-18 19:01:53,408 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.5991
105
+ 2025-09-18 19:01:58,921 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0730 | Val mean-roc_auc_score: 0.6026
106
+ 2025-09-18 19:02:04,089 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.6039
107
+ 2025-09-18 19:02:08,605 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.6102
108
+ 2025-09-18 19:02:13,353 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0740 | Val mean-roc_auc_score: 0.6046
109
+ 2025-09-18 19:02:15,052 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.6059
110
+ 2025-09-18 19:02:18,972 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.6095
111
+ 2025-09-18 19:02:23,479 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.5978
112
+ 2025-09-18 19:02:27,843 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.6096
113
+ 2025-09-18 19:02:32,249 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0696 | Val mean-roc_auc_score: 0.6025
114
+ 2025-09-18 19:02:37,205 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0691 | Val mean-roc_auc_score: 0.6025
115
+ 2025-09-18 19:02:41,198 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.6062
116
+ 2025-09-18 19:02:46,227 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.5999
117
+ 2025-09-18 19:02:47,682 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.6088
118
+ 2025-09-18 19:02:51,520 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.6021
119
+ 2025-09-18 19:02:55,564 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.6080
120
+ 2025-09-18 19:02:56,294 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6403
121
+ 2025-09-18 19:03:01,850 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset sider at 2025-09-18_19-03-01
122
+ 2025-09-18 19:03:00,587 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5464 | Val mean-roc_auc_score: 0.5763
123
+ 2025-09-18 19:03:00,587 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
124
+ 2025-09-18 19:03:01,196 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5763
125
+ 2025-09-18 19:03:06,763 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5036 | Val mean-roc_auc_score: 0.5748
126
+ 2025-09-18 19:03:10,875 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4875 | Val mean-roc_auc_score: 0.5911
127
+ 2025-09-18 19:03:11,068 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
128
+ 2025-09-18 19:03:11,691 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5911
129
+ 2025-09-18 19:03:15,830 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4714 | Val mean-roc_auc_score: 0.5972
130
+ 2025-09-18 19:03:16,008 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
131
+ 2025-09-18 19:03:16,556 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5972
132
+ 2025-09-18 19:03:20,571 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4500 | Val mean-roc_auc_score: 0.6033
133
+ 2025-09-18 19:03:20,761 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 175
134
+ 2025-09-18 19:03:21,333 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.6033
135
+ 2025-09-18 19:03:25,436 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4188 | Val mean-roc_auc_score: 0.6105
136
+ 2025-09-18 19:03:26,037 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 210
137
+ 2025-09-18 19:03:26,592 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.6105
138
+ 2025-09-18 19:03:30,609 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3875 | Val mean-roc_auc_score: 0.5980
139
+ 2025-09-18 19:03:34,696 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3607 | Val mean-roc_auc_score: 0.6364
140
+ 2025-09-18 19:03:34,878 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
141
+ 2025-09-18 19:03:35,452 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.6364
142
+ 2025-09-18 19:03:39,503 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3375 | Val mean-roc_auc_score: 0.6265
143
+ 2025-09-18 19:03:45,379 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3036 | Val mean-roc_auc_score: 0.6172
144
+ 2025-09-18 19:03:49,271 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2821 | Val mean-roc_auc_score: 0.6182
145
+ 2025-09-18 19:03:53,801 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2687 | Val mean-roc_auc_score: 0.6282
146
+ 2025-09-18 19:03:57,672 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2464 | Val mean-roc_auc_score: 0.6202
147
+ 2025-09-18 19:04:02,027 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2375 | Val mean-roc_auc_score: 0.6214
148
+ 2025-09-18 19:04:06,539 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2263 | Val mean-roc_auc_score: 0.6129
149
+ 2025-09-18 19:04:11,755 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2134 | Val mean-roc_auc_score: 0.6305
150
+ 2025-09-18 19:04:16,799 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2000 | Val mean-roc_auc_score: 0.6214
151
+ 2025-09-18 19:04:21,256 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1917 | Val mean-roc_auc_score: 0.6276
152
+ 2025-09-18 19:04:25,728 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1804 | Val mean-roc_auc_score: 0.6146
153
+ 2025-09-18 19:04:30,189 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1750 | Val mean-roc_auc_score: 0.6138
154
+ 2025-09-18 19:04:33,618 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1723 | Val mean-roc_auc_score: 0.6194
155
+ 2025-09-18 19:04:38,157 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1670 | Val mean-roc_auc_score: 0.6028
156
+ 2025-09-18 19:04:42,427 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1680 | Val mean-roc_auc_score: 0.6256
157
+ 2025-09-18 19:04:46,758 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1571 | Val mean-roc_auc_score: 0.6228
158
+ 2025-09-18 19:04:50,665 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1536 | Val mean-roc_auc_score: 0.6129
159
+ 2025-09-18 19:04:54,704 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1516 | Val mean-roc_auc_score: 0.6054
160
+ 2025-09-18 19:04:59,415 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1411 | Val mean-roc_auc_score: 0.6121
161
+ 2025-09-18 19:05:03,361 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1384 | Val mean-roc_auc_score: 0.6047
162
+ 2025-09-18 19:05:08,174 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1385 | Val mean-roc_auc_score: 0.6155
163
+ 2025-09-18 19:05:12,659 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1268 | Val mean-roc_auc_score: 0.6033
164
+ 2025-09-18 19:05:16,698 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1277 | Val mean-roc_auc_score: 0.6177
165
+ 2025-09-18 19:05:21,343 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1320 | Val mean-roc_auc_score: 0.6041
166
+ 2025-09-18 19:05:25,723 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1259 | Val mean-roc_auc_score: 0.6148
167
+ 2025-09-18 19:05:29,695 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1214 | Val mean-roc_auc_score: 0.6111
168
+ 2025-09-18 19:05:33,886 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1175 | Val mean-roc_auc_score: 0.6076
169
+ 2025-09-18 19:05:37,436 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6144
170
+ 2025-09-18 19:05:42,307 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1196 | Val mean-roc_auc_score: 0.6147
171
+ 2025-09-18 19:05:46,683 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1135 | Val mean-roc_auc_score: 0.6143
172
+ 2025-09-18 19:05:50,649 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1112 | Val mean-roc_auc_score: 0.6118
173
+ 2025-09-18 19:05:54,414 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.6175
174
+ 2025-09-18 19:05:58,445 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.6125
175
+ 2025-09-18 19:06:03,548 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1040 | Val mean-roc_auc_score: 0.6109
176
+ 2025-09-18 19:06:07,583 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1047 | Val mean-roc_auc_score: 0.6087
177
+ 2025-09-18 19:06:11,843 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1031 | Val mean-roc_auc_score: 0.6082
178
+ 2025-09-18 19:06:15,812 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1054 | Val mean-roc_auc_score: 0.6125
179
+ 2025-09-18 19:06:20,015 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1016 | Val mean-roc_auc_score: 0.6147
180
+ 2025-09-18 19:06:24,445 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0982 | Val mean-roc_auc_score: 0.6185
181
+ 2025-09-18 19:06:28,271 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.6108
182
+ 2025-09-18 19:06:32,025 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0979 | Val mean-roc_auc_score: 0.6102
183
+ 2025-09-18 19:06:36,403 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0946 | Val mean-roc_auc_score: 0.6070
184
+ 2025-09-18 19:06:40,398 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6107
185
+ 2025-09-18 19:06:44,919 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0965 | Val mean-roc_auc_score: 0.6201
186
+ 2025-09-18 19:06:49,091 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0924 | Val mean-roc_auc_score: 0.6088
187
+ 2025-09-18 19:06:53,038 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0929 | Val mean-roc_auc_score: 0.6093
188
+ 2025-09-18 19:07:02,251 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0925 | Val mean-roc_auc_score: 0.6124
189
+ 2025-09-18 19:07:01,115 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0929 | Val mean-roc_auc_score: 0.6042
190
+ 2025-09-18 19:07:05,731 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0897 | Val mean-roc_auc_score: 0.6113
191
+ 2025-09-18 19:07:10,749 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0901 | Val mean-roc_auc_score: 0.6061
192
+ 2025-09-18 19:07:14,049 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6112
193
+ 2025-09-18 19:07:17,810 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6194
194
+ 2025-09-18 19:07:27,266 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.6144
195
+ 2025-09-18 19:07:27,069 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.6114
196
+ 2025-09-18 19:07:30,825 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.6087
197
+ 2025-09-18 19:07:35,003 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.6151
198
+ 2025-09-18 19:07:39,970 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.6091
199
+ 2025-09-18 19:07:45,319 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0836 | Val mean-roc_auc_score: 0.6160
200
+ 2025-09-18 19:07:50,299 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0821 | Val mean-roc_auc_score: 0.6092
201
+ 2025-09-18 19:07:54,464 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0821 | Val mean-roc_auc_score: 0.6161
202
+ 2025-09-18 19:07:58,687 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0818 | Val mean-roc_auc_score: 0.6138
203
+ 2025-09-18 19:08:02,500 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0808 | Val mean-roc_auc_score: 0.6135
204
+ 2025-09-18 19:08:07,135 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.6118
205
+ 2025-09-18 19:08:11,714 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.6144
206
+ 2025-09-18 19:08:15,845 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.6132
207
+ 2025-09-18 19:08:19,634 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.6067
208
+ 2025-09-18 19:08:23,958 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.6038
209
+ 2025-09-18 19:08:28,038 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.5992
210
+ 2025-09-18 19:08:32,528 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.6062
211
+ 2025-09-18 19:08:37,020 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.6082
212
+ 2025-09-18 19:08:40,787 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.6043
213
+ 2025-09-18 19:08:44,666 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.6110
214
+ 2025-09-18 19:08:48,429 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.6102
215
+ 2025-09-18 19:08:52,900 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.6043
216
+ 2025-09-18 19:09:02,365 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0789 | Val mean-roc_auc_score: 0.6070
217
+ 2025-09-18 19:09:01,183 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.6148
218
+ 2025-09-18 19:09:04,783 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.6103
219
+ 2025-09-18 19:09:09,650 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.6134
220
+ 2025-09-18 19:09:14,348 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6091
221
+ 2025-09-18 19:09:18,325 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6056
222
+ 2025-09-18 19:09:22,236 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6128
223
+ 2025-09-18 19:09:26,397 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0759 | Val mean-roc_auc_score: 0.6127
224
+ 2025-09-18 19:09:30,585 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0759 | Val mean-roc_auc_score: 0.6068
225
+ 2025-09-18 19:09:35,335 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.6104
226
+ 2025-09-18 19:09:39,700 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.6109
227
+ 2025-09-18 19:09:43,758 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6005
228
+ 2025-09-18 19:09:47,958 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.6087
229
+ 2025-09-18 19:09:52,029 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6127
230
+ 2025-09-18 19:09:56,321 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.6102
231
+ 2025-09-18 19:09:59,929 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.6090
232
+ 2025-09-18 19:10:03,922 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.6153
233
+ 2025-09-18 19:10:07,606 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6110
234
+ 2025-09-18 19:10:08,399 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6536
235
+ 2025-09-18 19:10:08,862 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset sider at 2025-09-18_19-10-08
236
+ 2025-09-18 19:10:12,807 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5429 | Val mean-roc_auc_score: 0.5532
237
+ 2025-09-18 19:10:12,808 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
238
+ 2025-09-18 19:10:13,742 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5532
239
+ 2025-09-18 19:10:20,672 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5036 | Val mean-roc_auc_score: 0.5578
240
+ 2025-09-18 19:10:20,846 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
241
+ 2025-09-18 19:10:21,698 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5578
242
+ 2025-09-18 19:10:26,758 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.5031 | Val mean-roc_auc_score: 0.5905
243
+ 2025-09-18 19:10:26,948 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
244
+ 2025-09-18 19:10:27,537 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5905
245
+ 2025-09-18 19:10:31,841 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4714 | Val mean-roc_auc_score: 0.5977
246
+ 2025-09-18 19:10:32,008 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
247
+ 2025-09-18 19:10:32,603 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5977
248
+ 2025-09-18 19:10:37,463 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4500 | Val mean-roc_auc_score: 0.5921
249
+ 2025-09-18 19:10:42,026 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4031 | Val mean-roc_auc_score: 0.6033
250
+ 2025-09-18 19:10:42,718 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 210
251
+ 2025-09-18 19:10:43,289 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.6033
252
+ 2025-09-18 19:10:48,241 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3821 | Val mean-roc_auc_score: 0.6206
253
+ 2025-09-18 19:10:48,414 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 245
254
+ 2025-09-18 19:10:49,005 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.6206
255
+ 2025-09-18 19:10:50,303 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3607 | Val mean-roc_auc_score: 0.6146
256
+ 2025-09-18 19:10:54,108 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3292 | Val mean-roc_auc_score: 0.6260
257
+ 2025-09-18 19:10:54,289 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
258
+ 2025-09-18 19:10:54,820 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.6260
259
+ 2025-09-18 19:10:58,540 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3054 | Val mean-roc_auc_score: 0.6256
260
+ 2025-09-18 19:11:02,870 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2893 | Val mean-roc_auc_score: 0.6146
261
+ 2025-09-18 19:11:07,551 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2687 | Val mean-roc_auc_score: 0.6255
262
+ 2025-09-18 19:11:11,868 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2446 | Val mean-roc_auc_score: 0.6282
263
+ 2025-09-18 19:11:12,070 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 455
264
+ 2025-09-18 19:11:12,291 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val mean-roc_auc_score: 0.6282
265
+ 2025-09-18 19:11:16,893 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2321 | Val mean-roc_auc_score: 0.6194
266
+ 2025-09-18 19:11:20,715 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2288 | Val mean-roc_auc_score: 0.6141
267
+ 2025-09-18 19:11:24,997 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2071 | Val mean-roc_auc_score: 0.6152
268
+ 2025-09-18 19:11:29,229 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1955 | Val mean-roc_auc_score: 0.6112
269
+ 2025-09-18 19:11:33,927 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1875 | Val mean-roc_auc_score: 0.6227
270
+ 2025-09-18 19:11:38,045 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1786 | Val mean-roc_auc_score: 0.6170
271
+ 2025-09-18 19:11:42,237 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1750 | Val mean-roc_auc_score: 0.6084
272
+ 2025-09-18 19:11:46,196 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1661 | Val mean-roc_auc_score: 0.6081
273
+ 2025-09-18 19:11:50,808 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1616 | Val mean-roc_auc_score: 0.6104
274
+ 2025-09-18 19:11:54,961 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1578 | Val mean-roc_auc_score: 0.6123
275
+ 2025-09-18 19:11:59,072 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1500 | Val mean-roc_auc_score: 0.6030
276
+ 2025-09-18 19:12:03,051 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1500 | Val mean-roc_auc_score: 0.6085
277
+ 2025-09-18 19:12:12,647 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1570 | Val mean-roc_auc_score: 0.6048
278
+ 2025-09-18 19:12:12,138 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1384 | Val mean-roc_auc_score: 0.6144
279
+ 2025-09-18 19:12:16,071 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1339 | Val mean-roc_auc_score: 0.6162
280
+ 2025-09-18 19:12:20,327 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1323 | Val mean-roc_auc_score: 0.6124
281
+ 2025-09-18 19:12:24,417 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1250 | Val mean-roc_auc_score: 0.6061
282
+ 2025-09-18 19:12:28,391 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1223 | Val mean-roc_auc_score: 0.6039
283
+ 2025-09-18 19:12:32,964 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1273 | Val mean-roc_auc_score: 0.6102
284
+ 2025-09-18 19:12:37,056 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6079
285
+ 2025-09-18 19:12:41,053 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1143 | Val mean-roc_auc_score: 0.6035
286
+ 2025-09-18 19:12:45,214 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1169 | Val mean-roc_auc_score: 0.6046
287
+ 2025-09-18 19:12:48,878 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1170 | Val mean-roc_auc_score: 0.5933
288
+ 2025-09-18 19:12:54,206 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.6087
289
+ 2025-09-18 19:12:58,664 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.5982
290
+ 2025-09-18 19:13:03,217 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1054 | Val mean-roc_auc_score: 0.6057
291
+ 2025-09-18 19:13:07,509 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.5982
292
+ 2025-09-18 19:13:12,071 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1040 | Val mean-roc_auc_score: 0.6001
293
+ 2025-09-18 19:13:17,207 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1027 | Val mean-roc_auc_score: 0.6056
294
+ 2025-09-18 19:13:21,394 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.6013
295
+ 2025-09-18 19:13:25,877 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.6069
296
+ 2025-09-18 19:13:29,879 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1018 | Val mean-roc_auc_score: 0.6095
297
+ 2025-09-18 19:13:33,995 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1039 | Val mean-roc_auc_score: 0.5995
298
+ 2025-09-18 19:13:38,395 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0964 | Val mean-roc_auc_score: 0.6012
299
+ 2025-09-18 19:13:43,059 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6001
300
+ 2025-09-18 19:13:47,426 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0974 | Val mean-roc_auc_score: 0.6056
301
+ 2025-09-18 19:13:51,338 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0911 | Val mean-roc_auc_score: 0.6013
302
+ 2025-09-18 19:13:55,119 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0893 | Val mean-roc_auc_score: 0.6040
303
+ 2025-09-18 19:13:59,413 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0941 | Val mean-roc_auc_score: 0.5988
304
+ 2025-09-18 19:14:03,220 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0902 | Val mean-roc_auc_score: 0.5968
305
+ 2025-09-18 19:14:07,606 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0893 | Val mean-roc_auc_score: 0.6040
306
+ 2025-09-18 19:14:11,417 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0900 | Val mean-roc_auc_score: 0.5982
307
+ 2025-09-18 19:14:15,266 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.5983
308
+ 2025-09-18 19:14:19,629 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.6052
309
+ 2025-09-18 19:14:24,282 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0870 | Val mean-roc_auc_score: 0.6024
310
+ 2025-09-18 19:14:28,014 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0835 | Val mean-roc_auc_score: 0.5996
311
+ 2025-09-18 19:14:31,902 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.6081
312
+ 2025-09-18 19:14:35,667 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0835 | Val mean-roc_auc_score: 0.6115
313
+ 2025-09-18 19:14:39,974 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.6071
314
+ 2025-09-18 19:14:43,680 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0805 | Val mean-roc_auc_score: 0.6030
315
+ 2025-09-18 19:14:47,731 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.5947
316
+ 2025-09-18 19:14:51,533 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6045
317
+ 2025-09-18 19:14:55,414 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.5988
318
+ 2025-09-18 19:14:59,881 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6004
319
+ 2025-09-18 19:15:03,535 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.6023
320
+ 2025-09-18 19:15:07,826 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0823 | Val mean-roc_auc_score: 0.5999
321
+ 2025-09-18 19:15:11,642 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6000
322
+ 2025-09-18 19:15:15,531 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.5992
323
+ 2025-09-18 19:15:19,704 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0836 | Val mean-roc_auc_score: 0.6028
324
+ 2025-09-18 19:15:23,464 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5979
325
+ 2025-09-18 19:15:27,654 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.5983
326
+ 2025-09-18 19:15:31,230 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0794 | Val mean-roc_auc_score: 0.5959
327
+ 2025-09-18 19:15:34,970 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.6057
328
+ 2025-09-18 19:15:39,152 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5969
329
+ 2025-09-18 19:15:43,010 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0771 | Val mean-roc_auc_score: 0.6143
330
+ 2025-09-18 19:15:47,026 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.6055
331
+ 2025-09-18 19:15:50,797 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6007
332
+ 2025-09-18 19:15:54,599 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.5933
333
+ 2025-09-18 19:15:58,993 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5988
334
+ 2025-09-18 19:16:02,873 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0820 | Val mean-roc_auc_score: 0.5968
335
+ 2025-09-18 19:16:06,650 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.6044
336
+ 2025-09-18 19:16:10,615 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.5928
337
+ 2025-09-18 19:16:15,325 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5994
338
+ 2025-09-18 19:16:19,734 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.5988
339
+ 2025-09-18 19:16:24,178 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6015
340
+ 2025-09-18 19:16:28,024 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0724 | Val mean-roc_auc_score: 0.6005
341
+ 2025-09-18 19:16:32,301 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.6038
342
+ 2025-09-18 19:16:35,958 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.6026
343
+ 2025-09-18 19:16:40,360 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0758 | Val mean-roc_auc_score: 0.5923
344
+ 2025-09-18 19:16:44,513 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6005
345
+ 2025-09-18 19:16:47,885 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.5967
346
+ 2025-09-18 19:16:52,001 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0753 | Val mean-roc_auc_score: 0.5993
347
+ 2025-09-18 19:16:56,020 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.5986
348
+ 2025-09-18 19:17:00,231 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.5975
349
+ 2025-09-18 19:17:03,940 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.5968
350
+ 2025-09-18 19:17:07,809 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.5986
351
+ 2025-09-18 19:17:12,107 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.6029
352
+ 2025-09-18 19:17:12,710 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6306
353
+ 2025-09-18 19:17:18,196 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.6415, Std Dev: 0.0094
logs_modchembert_classification_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_173530.log ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 17:35:30,400 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Running benchmark for dataset: tox21
2
+ 2025-09-18 17:35:30,400 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - dataset: tox21, tasks: ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 17:35:30,404 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset tox21 at 2025-09-18_17-35-30
4
+ 2025-09-18 17:35:43,563 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1732 | Val mean-roc_auc_score: 0.7377
5
+ 2025-09-18 17:35:43,563 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
6
+ 2025-09-18 17:35:44,095 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7377
7
+ 2025-09-18 17:36:01,752 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1624 | Val mean-roc_auc_score: 0.7535
8
+ 2025-09-18 17:36:01,949 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
9
+ 2025-09-18 17:36:02,490 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7535
10
+ 2025-09-18 17:36:17,836 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1548 | Val mean-roc_auc_score: 0.7646
11
+ 2025-09-18 17:36:18,015 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
12
+ 2025-09-18 17:36:18,605 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7646
13
+ 2025-09-18 17:36:36,039 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1525 | Val mean-roc_auc_score: 0.7719
14
+ 2025-09-18 17:36:36,189 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 784
15
+ 2025-09-18 17:36:36,745 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7719
16
+ 2025-09-18 17:36:52,592 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1508 | Val mean-roc_auc_score: 0.7747
17
+ 2025-09-18 17:36:52,773 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
18
+ 2025-09-18 17:36:53,349 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7747
19
+ 2025-09-18 17:37:09,872 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1480 | Val mean-roc_auc_score: 0.7843
20
+ 2025-09-18 17:37:10,387 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1176
21
+ 2025-09-18 17:37:10,926 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7843
22
+ 2025-09-18 17:37:29,326 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1476 | Val mean-roc_auc_score: 0.7770
23
+ 2025-09-18 17:37:44,654 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1379 | Val mean-roc_auc_score: 0.7780
24
+ 2025-09-18 17:38:02,929 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1377 | Val mean-roc_auc_score: 0.7786
25
+ 2025-09-18 17:38:17,555 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1292 | Val mean-roc_auc_score: 0.7800
26
+ 2025-09-18 17:38:36,228 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1244 | Val mean-roc_auc_score: 0.7777
27
+ 2025-09-18 17:38:51,341 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1166 | Val mean-roc_auc_score: 0.7660
28
+ 2025-09-18 17:39:09,880 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1159 | Val mean-roc_auc_score: 0.7856
29
+ 2025-09-18 17:39:10,025 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 2548
30
+ 2025-09-18 17:39:10,592 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val mean-roc_auc_score: 0.7856
31
+ 2025-09-18 17:39:25,462 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1158 | Val mean-roc_auc_score: 0.7709
32
+ 2025-09-18 17:39:40,045 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1109 | Val mean-roc_auc_score: 0.7675
33
+ 2025-09-18 17:39:58,337 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1046 | Val mean-roc_auc_score: 0.7572
34
+ 2025-09-18 17:40:13,505 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1118 | Val mean-roc_auc_score: 0.7632
35
+ 2025-09-18 17:40:30,732 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1083 | Val mean-roc_auc_score: 0.7575
36
+ 2025-09-18 17:40:45,000 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0990 | Val mean-roc_auc_score: 0.7479
37
+ 2025-09-18 17:41:02,353 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1008 | Val mean-roc_auc_score: 0.7623
38
+ 2025-09-18 17:41:18,085 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1064 | Val mean-roc_auc_score: 0.7542
39
+ 2025-09-18 17:41:35,676 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0885 | Val mean-roc_auc_score: 0.7446
40
+ 2025-09-18 17:41:50,447 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1006 | Val mean-roc_auc_score: 0.7556
41
+ 2025-09-18 17:42:07,867 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1167 | Val mean-roc_auc_score: 0.7644
42
+ 2025-09-18 17:42:22,503 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0894 | Val mean-roc_auc_score: 0.7481
43
+ 2025-09-18 17:42:40,104 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.7501
44
+ 2025-09-18 17:42:54,954 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.7473
45
+ 2025-09-18 17:43:11,570 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0863 | Val mean-roc_auc_score: 0.7472
46
+ 2025-09-18 17:43:27,720 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0856 | Val mean-roc_auc_score: 0.7495
47
+ 2025-09-18 17:43:42,785 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0832 | Val mean-roc_auc_score: 0.7506
48
+ 2025-09-18 17:44:01,864 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0789 | Val mean-roc_auc_score: 0.7387
49
+ 2025-09-18 17:44:16,956 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0829 | Val mean-roc_auc_score: 0.7426
50
+ 2025-09-18 17:44:35,235 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.7348
51
+ 2025-09-18 17:44:49,822 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0776 | Val mean-roc_auc_score: 0.7531
52
+ 2025-09-18 17:45:07,136 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0766 | Val mean-roc_auc_score: 0.7406
53
+ 2025-09-18 17:45:22,090 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0776 | Val mean-roc_auc_score: 0.7445
54
+ 2025-09-18 17:45:39,019 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7366
55
+ 2025-09-18 17:45:53,531 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0736 | Val mean-roc_auc_score: 0.7429
56
+ 2025-09-18 17:46:10,273 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0778 | Val mean-roc_auc_score: 0.7411
57
+ 2025-09-18 17:46:24,372 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.7510
58
+ 2025-09-18 17:46:45,088 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0760 | Val mean-roc_auc_score: 0.7391
59
+ 2025-09-18 17:46:56,767 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7349
60
+ 2025-09-18 17:47:11,721 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0815 | Val mean-roc_auc_score: 0.7423
61
+ 2025-09-18 17:47:28,609 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0745 | Val mean-roc_auc_score: 0.7264
62
+ 2025-09-18 17:47:42,661 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.7339
63
+ 2025-09-18 17:48:00,199 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0864 | Val mean-roc_auc_score: 0.7424
64
+ 2025-09-18 17:48:14,825 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0635 | Val mean-roc_auc_score: 0.7363
65
+ 2025-09-18 17:48:31,490 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0801 | Val mean-roc_auc_score: 0.7333
66
+ 2025-09-18 17:48:45,810 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.7327
67
+ 2025-09-18 17:49:02,795 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0669 | Val mean-roc_auc_score: 0.7360
68
+ 2025-09-18 17:49:17,209 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0687 | Val mean-roc_auc_score: 0.7313
69
+ 2025-09-18 17:49:36,129 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0659 | Val mean-roc_auc_score: 0.7358
70
+ 2025-09-18 17:49:50,302 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0650 | Val mean-roc_auc_score: 0.7355
71
+ 2025-09-18 17:50:07,539 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0655 | Val mean-roc_auc_score: 0.7397
72
+ 2025-09-18 17:50:22,441 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0672 | Val mean-roc_auc_score: 0.7281
73
+ 2025-09-18 17:50:39,624 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0650 | Val mean-roc_auc_score: 0.7265
74
+ 2025-09-18 17:50:55,362 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0634 | Val mean-roc_auc_score: 0.7362
75
+ 2025-09-18 17:51:10,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0643 | Val mean-roc_auc_score: 0.7354
76
+ 2025-09-18 17:51:27,251 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7335
77
+ 2025-09-18 17:51:42,519 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0633 | Val mean-roc_auc_score: 0.7381
78
+ 2025-09-18 17:51:59,497 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7376
79
+ 2025-09-18 17:52:15,623 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0637 | Val mean-roc_auc_score: 0.7222
80
+ 2025-09-18 17:52:32,777 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7332
81
+ 2025-09-18 17:52:47,761 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0618 | Val mean-roc_auc_score: 0.7341
82
+ 2025-09-18 17:53:04,818 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0637 | Val mean-roc_auc_score: 0.7312
83
+ 2025-09-18 17:53:19,322 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0599 | Val mean-roc_auc_score: 0.7318
84
+ 2025-09-18 17:53:37,741 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.7347
85
+ 2025-09-18 17:53:52,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.7330
86
+ 2025-09-18 17:54:10,018 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0622 | Val mean-roc_auc_score: 0.7275
87
+ 2025-09-18 17:54:24,786 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0590 | Val mean-roc_auc_score: 0.7333
88
+ 2025-09-18 17:54:41,791 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.7320
89
+ 2025-09-18 17:54:57,734 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7294
90
+ 2025-09-18 17:55:12,061 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0588 | Val mean-roc_auc_score: 0.7285
91
+ 2025-09-18 17:55:28,672 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0591 | Val mean-roc_auc_score: 0.7315
92
+ 2025-09-18 17:55:42,805 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7302
93
+ 2025-09-18 17:55:59,910 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0570 | Val mean-roc_auc_score: 0.7326
94
+ 2025-09-18 17:56:15,460 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0574 | Val mean-roc_auc_score: 0.7287
95
+ 2025-09-18 17:56:32,040 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0597 | Val mean-roc_auc_score: 0.7270
96
+ 2025-09-18 17:56:46,362 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0580 | Val mean-roc_auc_score: 0.7340
97
+ 2025-09-18 17:57:03,170 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7292
98
+ 2025-09-18 17:57:17,505 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0596 | Val mean-roc_auc_score: 0.7327
99
+ 2025-09-18 17:57:36,001 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7393
100
+ 2025-09-18 17:57:50,354 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0593 | Val mean-roc_auc_score: 0.7267
101
+ 2025-09-18 17:58:07,348 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.7324
102
+ 2025-09-18 17:58:21,259 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7367
103
+ 2025-09-18 17:58:37,909 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0569 | Val mean-roc_auc_score: 0.7368
104
+ 2025-09-18 17:58:53,305 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0571 | Val mean-roc_auc_score: 0.7358
105
+ 2025-09-18 17:59:09,927 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0615 | Val mean-roc_auc_score: 0.7373
106
+ 2025-09-18 17:59:24,069 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7369
107
+ 2025-09-18 17:59:40,421 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0559 | Val mean-roc_auc_score: 0.7282
108
+ 2025-09-18 17:59:54,872 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0536 | Val mean-roc_auc_score: 0.7299
109
+ 2025-09-18 18:00:12,839 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0569 | Val mean-roc_auc_score: 0.7318
110
+ 2025-09-18 18:00:27,129 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0600 | Val mean-roc_auc_score: 0.7324
111
+ 2025-09-18 18:00:41,521 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7350
112
+ 2025-09-18 18:00:58,391 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.7237
113
+ 2025-09-18 18:01:12,574 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0530 | Val mean-roc_auc_score: 0.7319
114
+ 2025-09-18 18:01:30,885 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7284
115
+ 2025-09-18 18:01:45,265 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7265
116
+ 2025-09-18 18:02:02,083 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0464 | Val mean-roc_auc_score: 0.7354
117
+ 2025-09-18 18:02:16,419 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7350
118
+ 2025-09-18 18:02:17,401 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7224
119
+ 2025-09-18 18:02:17,829 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset tox21 at 2025-09-18_18-02-17
120
+ 2025-09-18 18:02:31,850 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1758 | Val mean-roc_auc_score: 0.7451
121
+ 2025-09-18 18:02:31,851 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
122
+ 2025-09-18 18:02:32,526 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7451
123
+ 2025-09-18 18:02:46,175 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1624 | Val mean-roc_auc_score: 0.7531
124
+ 2025-09-18 18:02:46,349 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
125
+ 2025-09-18 18:02:46,894 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7531
126
+ 2025-09-18 18:03:03,547 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1562 | Val mean-roc_auc_score: 0.7680
127
+ 2025-09-18 18:03:03,724 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
128
+ 2025-09-18 18:03:04,282 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7680
129
+ 2025-09-18 18:03:18,243 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1503 | Val mean-roc_auc_score: 0.7613
130
+ 2025-09-18 18:03:34,872 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1492 | Val mean-roc_auc_score: 0.7731
131
+ 2025-09-18 18:03:35,014 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
132
+ 2025-09-18 18:03:35,590 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7731
133
+ 2025-09-18 18:03:50,657 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1530 | Val mean-roc_auc_score: 0.7726
134
+ 2025-09-18 18:04:07,724 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1415 | Val mean-roc_auc_score: 0.7698
135
+ 2025-09-18 18:04:21,831 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1360 | Val mean-roc_auc_score: 0.7723
136
+ 2025-09-18 18:04:38,218 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1357 | Val mean-roc_auc_score: 0.7669
137
+ 2025-09-18 18:04:52,284 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1240 | Val mean-roc_auc_score: 0.7744
138
+ 2025-09-18 18:04:52,431 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1960
139
+ 2025-09-18 18:04:52,986 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.7744
140
+ 2025-09-18 18:05:10,687 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1300 | Val mean-roc_auc_score: 0.7692
141
+ 2025-09-18 18:05:25,285 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1124 | Val mean-roc_auc_score: 0.7644
142
+ 2025-09-18 18:05:41,786 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1113 | Val mean-roc_auc_score: 0.7650
143
+ 2025-09-18 18:05:55,539 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1179 | Val mean-roc_auc_score: 0.7604
144
+ 2025-09-18 18:06:12,109 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1102 | Val mean-roc_auc_score: 0.7631
145
+ 2025-09-18 18:06:26,313 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1016 | Val mean-roc_auc_score: 0.7480
146
+ 2025-09-18 18:06:42,821 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0947 | Val mean-roc_auc_score: 0.7538
147
+ 2025-09-18 18:06:56,487 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1122 | Val mean-roc_auc_score: 0.7532
148
+ 2025-09-18 18:07:13,483 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.7515
149
+ 2025-09-18 18:07:27,541 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1047 | Val mean-roc_auc_score: 0.7446
150
+ 2025-09-18 18:07:42,600 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0850 | Val mean-roc_auc_score: 0.7385
151
+ 2025-09-18 18:07:59,008 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.7449
152
+ 2025-09-18 18:08:15,306 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0903 | Val mean-roc_auc_score: 0.7436
153
+ 2025-09-18 18:08:28,740 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0771 | Val mean-roc_auc_score: 0.7410
154
+ 2025-09-18 18:08:45,010 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0894 | Val mean-roc_auc_score: 0.7414
155
+ 2025-09-18 18:08:59,610 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0866 | Val mean-roc_auc_score: 0.7449
156
+ 2025-09-18 18:09:16,281 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0842 | Val mean-roc_auc_score: 0.7418
157
+ 2025-09-18 18:09:30,085 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0877 | Val mean-roc_auc_score: 0.7484
158
+ 2025-09-18 18:09:46,445 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0800 | Val mean-roc_auc_score: 0.7340
159
+ 2025-09-18 18:10:00,114 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.7432
160
+ 2025-09-18 18:10:17,305 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0798 | Val mean-roc_auc_score: 0.7403
161
+ 2025-09-18 18:10:31,604 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0764 | Val mean-roc_auc_score: 0.7456
162
+ 2025-09-18 18:10:47,076 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0827 | Val mean-roc_auc_score: 0.7381
163
+ 2025-09-18 18:11:03,127 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7413
164
+ 2025-09-18 18:11:17,381 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.7354
165
+ 2025-09-18 18:11:34,553 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.7406
166
+ 2025-09-18 18:11:49,158 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0769 | Val mean-roc_auc_score: 0.7304
167
+ 2025-09-18 18:12:05,850 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0798 | Val mean-roc_auc_score: 0.7304
168
+ 2025-09-18 18:12:19,003 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0803 | Val mean-roc_auc_score: 0.7337
169
+ 2025-09-18 18:12:34,282 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.7374
170
+ 2025-09-18 18:12:50,521 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7297
171
+ 2025-09-18 18:13:06,632 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.7393
172
+ 2025-09-18 18:13:23,286 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0695 | Val mean-roc_auc_score: 0.7273
173
+ 2025-09-18 18:13:37,168 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0755 | Val mean-roc_auc_score: 0.7303
174
+ 2025-09-18 18:13:52,412 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0770 | Val mean-roc_auc_score: 0.7366
175
+ 2025-09-18 18:14:08,068 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.7367
176
+ 2025-09-18 18:14:23,671 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0739 | Val mean-roc_auc_score: 0.7313
177
+ 2025-09-18 18:14:38,913 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0649 | Val mean-roc_auc_score: 0.7301
178
+ 2025-09-18 18:14:53,711 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0820 | Val mean-roc_auc_score: 0.7328
179
+ 2025-09-18 18:15:09,118 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0678 | Val mean-roc_auc_score: 0.7246
180
+ 2025-09-18 18:15:24,679 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0641 | Val mean-roc_auc_score: 0.7260
181
+ 2025-09-18 18:15:41,683 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0673 | Val mean-roc_auc_score: 0.7232
182
+ 2025-09-18 18:15:57,262 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0621 | Val mean-roc_auc_score: 0.7235
183
+ 2025-09-18 18:16:12,581 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0696 | Val mean-roc_auc_score: 0.7289
184
+ 2025-09-18 18:16:27,522 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0621 | Val mean-roc_auc_score: 0.7307
185
+ 2025-09-18 18:16:44,467 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0646 | Val mean-roc_auc_score: 0.7293
186
+ 2025-09-18 18:17:00,231 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7318
187
+ 2025-09-18 18:17:16,605 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0639 | Val mean-roc_auc_score: 0.7318
188
+ 2025-09-18 18:17:30,635 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7302
189
+ 2025-09-18 18:17:47,561 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0628 | Val mean-roc_auc_score: 0.7271
190
+ 2025-09-18 18:18:01,829 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0589 | Val mean-roc_auc_score: 0.7292
191
+ 2025-09-18 18:18:19,996 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0616 | Val mean-roc_auc_score: 0.7335
192
+ 2025-09-18 18:18:34,709 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0602 | Val mean-roc_auc_score: 0.7301
193
+ 2025-09-18 18:18:51,312 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0593 | Val mean-roc_auc_score: 0.7241
194
+ 2025-09-18 18:19:05,831 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7287
195
+ 2025-09-18 18:19:22,240 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0690 | Val mean-roc_auc_score: 0.7257
196
+ 2025-09-18 18:19:38,495 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0623 | Val mean-roc_auc_score: 0.7283
197
+ 2025-09-18 18:19:56,054 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0603 | Val mean-roc_auc_score: 0.7261
198
+ 2025-09-18 18:20:10,165 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0667 | Val mean-roc_auc_score: 0.7333
199
+ 2025-09-18 18:20:26,944 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0652 | Val mean-roc_auc_score: 0.7163
200
+ 2025-09-18 18:20:41,132 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0703 | Val mean-roc_auc_score: 0.7279
201
+ 2025-09-18 18:20:59,012 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0641 | Val mean-roc_auc_score: 0.7297
202
+ 2025-09-18 18:21:12,900 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0649 | Val mean-roc_auc_score: 0.7302
203
+ 2025-09-18 18:21:29,517 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7354
204
+ 2025-09-18 18:21:43,668 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0575 | Val mean-roc_auc_score: 0.7299
205
+ 2025-09-18 18:22:01,266 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0576 | Val mean-roc_auc_score: 0.7333
206
+ 2025-09-18 18:22:17,460 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7254
207
+ 2025-09-18 18:22:33,508 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0561 | Val mean-roc_auc_score: 0.7256
208
+ 2025-09-18 18:22:49,710 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7298
209
+ 2025-09-18 18:23:06,798 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0574 | Val mean-roc_auc_score: 0.7240
210
+ 2025-09-18 18:23:23,487 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7278
211
+ 2025-09-18 18:23:38,290 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0621 | Val mean-roc_auc_score: 0.7245
212
+ 2025-09-18 18:23:53,932 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7288
213
+ 2025-09-18 18:24:09,367 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0571 | Val mean-roc_auc_score: 0.7181
214
+ 2025-09-18 18:24:24,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7256
215
+ 2025-09-18 18:24:40,662 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7299
216
+ 2025-09-18 18:24:57,203 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0568 | Val mean-roc_auc_score: 0.7209
217
+ 2025-09-18 18:25:13,339 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0592 | Val mean-roc_auc_score: 0.7250
218
+ 2025-09-18 18:25:28,985 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7235
219
+ 2025-09-18 18:25:45,357 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7309
220
+ 2025-09-18 18:26:01,097 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.7277
221
+ 2025-09-18 18:26:23,583 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7301
222
+ 2025-09-18 18:26:34,195 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0530 | Val mean-roc_auc_score: 0.7308
223
+ 2025-09-18 18:26:50,499 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7258
224
+ 2025-09-18 18:27:07,946 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0516 | Val mean-roc_auc_score: 0.7265
225
+ 2025-09-18 18:27:22,991 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0608 | Val mean-roc_auc_score: 0.7217
226
+ 2025-09-18 18:27:41,994 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0612 | Val mean-roc_auc_score: 0.7216
227
+ 2025-09-18 18:27:55,802 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0537 | Val mean-roc_auc_score: 0.7232
228
+ 2025-09-18 18:28:13,450 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0762 | Val mean-roc_auc_score: 0.7245
229
+ 2025-09-18 18:28:28,306 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0559 | Val mean-roc_auc_score: 0.7256
230
+ 2025-09-18 18:28:29,454 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7255
231
+ 2025-09-18 18:28:29,915 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset tox21 at 2025-09-18_18-28-29
232
+ 2025-09-18 18:28:45,436 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1771 | Val mean-roc_auc_score: 0.7343
233
+ 2025-09-18 18:28:45,436 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
234
+ 2025-09-18 18:28:46,053 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7343
235
+ 2025-09-18 18:29:00,658 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1617 | Val mean-roc_auc_score: 0.7502
236
+ 2025-09-18 18:29:00,831 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
237
+ 2025-09-18 18:29:01,381 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7502
238
+ 2025-09-18 18:29:18,588 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1605 | Val mean-roc_auc_score: 0.7649
239
+ 2025-09-18 18:29:18,769 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
240
+ 2025-09-18 18:29:19,304 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7649
241
+ 2025-09-18 18:29:35,894 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1451 | Val mean-roc_auc_score: 0.7578
242
+ 2025-09-18 18:29:58,900 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1484 | Val mean-roc_auc_score: 0.7710
243
+ 2025-09-18 18:29:59,051 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
244
+ 2025-09-18 18:29:54,185 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7710
245
+ 2025-09-18 18:30:12,708 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1390 | Val mean-roc_auc_score: 0.7724
246
+ 2025-09-18 18:30:13,330 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1176
247
+ 2025-09-18 18:30:13,923 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7724
248
+ 2025-09-18 18:30:28,976 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1372 | Val mean-roc_auc_score: 0.7757
249
+ 2025-09-18 18:30:29,150 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1372
250
+ 2025-09-18 18:30:29,676 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.7757
251
+ 2025-09-18 18:30:47,355 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1406 | Val mean-roc_auc_score: 0.7708
252
+ 2025-09-18 18:31:01,761 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1279 | Val mean-roc_auc_score: 0.7698
253
+ 2025-09-18 18:31:18,913 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1276 | Val mean-roc_auc_score: 0.7849
254
+ 2025-09-18 18:31:19,066 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1960
255
+ 2025-09-18 18:31:19,616 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.7849
256
+ 2025-09-18 18:31:35,714 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1244 | Val mean-roc_auc_score: 0.7643
257
+ 2025-09-18 18:31:53,717 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1232 | Val mean-roc_auc_score: 0.7641
258
+ 2025-09-18 18:32:08,658 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1100 | Val mean-roc_auc_score: 0.7738
259
+ 2025-09-18 18:32:25,710 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1108 | Val mean-roc_auc_score: 0.7645
260
+ 2025-09-18 18:32:40,595 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.7621
261
+ 2025-09-18 18:32:55,526 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1068 | Val mean-roc_auc_score: 0.7646
262
+ 2025-09-18 18:33:13,563 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1074 | Val mean-roc_auc_score: 0.7620
263
+ 2025-09-18 18:33:27,375 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1032 | Val mean-roc_auc_score: 0.7574
264
+ 2025-09-18 18:33:44,424 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1120 | Val mean-roc_auc_score: 0.7526
265
+ 2025-09-18 18:33:58,834 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0996 | Val mean-roc_auc_score: 0.7587
266
+ 2025-09-18 18:34:17,040 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0986 | Val mean-roc_auc_score: 0.7595
267
+ 2025-09-18 18:34:31,571 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0977 | Val mean-roc_auc_score: 0.7579
268
+ 2025-09-18 18:34:48,632 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0845 | Val mean-roc_auc_score: 0.7542
269
+ 2025-09-18 18:35:03,280 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.7482
270
+ 2025-09-18 18:35:21,229 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0869 | Val mean-roc_auc_score: 0.7562
271
+ 2025-09-18 18:35:36,290 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.7479
272
+ 2025-09-18 18:35:54,559 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0815 | Val mean-roc_auc_score: 0.7425
273
+ 2025-09-18 18:36:08,133 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0852 | Val mean-roc_auc_score: 0.7382
274
+ 2025-09-18 18:36:25,916 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0818 | Val mean-roc_auc_score: 0.7456
275
+ 2025-09-18 18:36:40,406 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0828 | Val mean-roc_auc_score: 0.7456
276
+ 2025-09-18 18:36:56,131 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0814 | Val mean-roc_auc_score: 0.7468
277
+ 2025-09-18 18:37:13,689 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0816 | Val mean-roc_auc_score: 0.7437
278
+ 2025-09-18 18:37:27,514 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0767 | Val mean-roc_auc_score: 0.7420
279
+ 2025-09-18 18:37:45,189 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7452
280
+ 2025-09-18 18:37:59,337 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0760 | Val mean-roc_auc_score: 0.7518
281
+ 2025-09-18 18:38:18,219 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0748 | Val mean-roc_auc_score: 0.7451
282
+ 2025-09-18 18:38:33,439 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.7498
283
+ 2025-09-18 18:38:50,783 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7429
284
+ 2025-09-18 18:39:05,445 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0767 | Val mean-roc_auc_score: 0.7418
285
+ 2025-09-18 18:39:22,697 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0711 | Val mean-roc_auc_score: 0.7338
286
+ 2025-09-18 18:39:37,654 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0812 | Val mean-roc_auc_score: 0.7391
287
+ 2025-09-18 18:39:56,193 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7307
288
+ 2025-09-18 18:40:10,870 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0770 | Val mean-roc_auc_score: 0.7277
289
+ 2025-09-18 18:40:26,124 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0706 | Val mean-roc_auc_score: 0.7322
290
+ 2025-09-18 18:40:44,486 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0730 | Val mean-roc_auc_score: 0.7341
291
+ 2025-09-18 18:41:00,346 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.7328
292
+ 2025-09-18 18:41:18,793 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.7399
293
+ 2025-09-18 18:41:33,489 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0845 | Val mean-roc_auc_score: 0.7393
294
+ 2025-09-18 18:41:50,895 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7376
295
+ 2025-09-18 18:42:04,836 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0656 | Val mean-roc_auc_score: 0.7426
296
+ 2025-09-18 18:42:22,425 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7467
297
+ 2025-09-18 18:42:38,908 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0676 | Val mean-roc_auc_score: 0.7448
298
+ 2025-09-18 18:42:56,283 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7343
299
+ 2025-09-18 18:43:10,606 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0632 | Val mean-roc_auc_score: 0.7364
300
+ 2025-09-18 18:43:25,873 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0660 | Val mean-roc_auc_score: 0.7318
301
+ 2025-09-18 18:43:43,592 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0633 | Val mean-roc_auc_score: 0.7361
302
+ 2025-09-18 18:43:59,458 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0621 | Val mean-roc_auc_score: 0.7365
303
+ 2025-09-18 18:44:16,432 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0607 | Val mean-roc_auc_score: 0.7397
304
+ 2025-09-18 18:44:30,318 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7384
305
+ 2025-09-18 18:44:47,613 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0602 | Val mean-roc_auc_score: 0.7278
306
+ 2025-09-18 18:45:01,941 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7401
307
+ 2025-09-18 18:45:21,004 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0610 | Val mean-roc_auc_score: 0.7353
308
+ 2025-09-18 18:45:35,863 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0628 | Val mean-roc_auc_score: 0.7291
309
+ 2025-09-18 18:45:53,296 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0646 | Val mean-roc_auc_score: 0.7247
310
+ 2025-09-18 18:46:08,173 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0633 | Val mean-roc_auc_score: 0.7322
311
+ 2025-09-18 18:46:26,496 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0616 | Val mean-roc_auc_score: 0.7281
312
+ 2025-09-18 18:46:42,659 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0615 | Val mean-roc_auc_score: 0.7350
313
+ 2025-09-18 18:46:57,206 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7364
314
+ 2025-09-18 18:47:14,583 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0596 | Val mean-roc_auc_score: 0.7267
315
+ 2025-09-18 18:47:29,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0648 | Val mean-roc_auc_score: 0.7353
316
+ 2025-09-18 18:47:46,831 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0562 | Val mean-roc_auc_score: 0.7283
317
+ 2025-09-18 18:48:02,574 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0570 | Val mean-roc_auc_score: 0.7350
318
+ 2025-09-18 18:48:19,850 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7241
319
+ 2025-09-18 18:48:33,719 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.7286
320
+ 2025-09-18 18:48:51,825 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7317
321
+ 2025-09-18 18:49:06,605 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7252
322
+ 2025-09-18 18:49:25,265 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7316
323
+ 2025-09-18 18:49:39,572 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0554 | Val mean-roc_auc_score: 0.7286
324
+ 2025-09-18 18:49:56,335 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0599 | Val mean-roc_auc_score: 0.7361
325
+ 2025-09-18 18:50:11,054 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0555 | Val mean-roc_auc_score: 0.7263
326
+ 2025-09-18 18:50:25,963 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7302
327
+ 2025-09-18 18:50:44,844 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0532 | Val mean-roc_auc_score: 0.7230
328
+ 2025-09-18 18:50:59,275 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0549 | Val mean-roc_auc_score: 0.7259
329
+ 2025-09-18 18:51:17,428 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7265
330
+ 2025-09-18 18:51:31,982 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0526 | Val mean-roc_auc_score: 0.7342
331
+ 2025-09-18 18:51:49,976 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0594 | Val mean-roc_auc_score: 0.7278
332
+ 2025-09-18 18:52:05,798 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0553 | Val mean-roc_auc_score: 0.7327
333
+ 2025-09-18 18:52:23,029 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0553 | Val mean-roc_auc_score: 0.7298
334
+ 2025-09-18 18:52:37,997 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.7300
335
+ 2025-09-18 18:52:56,470 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0520 | Val mean-roc_auc_score: 0.7282
336
+ 2025-09-18 18:53:12,102 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0599 | Val mean-roc_auc_score: 0.7356
337
+ 2025-09-18 18:53:29,045 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0535 | Val mean-roc_auc_score: 0.7356
338
+ 2025-09-18 18:53:46,131 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0525 | Val mean-roc_auc_score: 0.7293
339
+ 2025-09-18 18:54:01,404 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0570 | Val mean-roc_auc_score: 0.7313
340
+ 2025-09-18 18:54:19,227 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.7330
341
+ 2025-09-18 18:54:33,717 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0581 | Val mean-roc_auc_score: 0.7274
342
+ 2025-09-18 18:54:52,638 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0505 | Val mean-roc_auc_score: 0.7304
343
+ 2025-09-18 18:55:08,074 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0486 | Val mean-roc_auc_score: 0.7303
344
+ 2025-09-18 18:55:31,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7177
345
+ 2025-09-18 18:55:45,332 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0531 | Val mean-roc_auc_score: 0.7335
346
+ 2025-09-18 18:55:46,519 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7311
347
+ 2025-09-18 18:55:47,012 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7263, Std Dev: 0.0036
logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_171320.log ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 17:13:20,108 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_regression
2
+ 2025-09-18 17:13:20,108 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - dataset: bace_regression, tasks: ['pIC50'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 17:13:20,112 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_regression at 2025-09-18_17-13-20
4
+ 2025-09-18 17:13:28,917 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5362 | Val rms_score: 0.6470
5
+ 2025-09-18 17:13:28,918 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 17:13:29,552 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.6470
7
+ 2025-09-18 17:13:37,201 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2862 | Val rms_score: 0.6693
8
+ 2025-09-18 17:13:46,631 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2589 | Val rms_score: 0.7100
9
+ 2025-09-18 17:13:56,858 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2015 | Val rms_score: 0.6879
10
+ 2025-09-18 17:14:06,263 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1743 | Val rms_score: 0.6315
11
+ 2025-09-18 17:14:06,401 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 190
12
+ 2025-09-18 17:14:06,943 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.6315
13
+ 2025-09-18 17:14:13,941 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1574 | Val rms_score: 0.8083
14
+ 2025-09-18 17:14:24,294 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1554 | Val rms_score: 0.8296
15
+ 2025-09-18 17:14:33,435 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1602 | Val rms_score: 0.6962
16
+ 2025-09-18 17:14:41,210 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1225 | Val rms_score: 0.7183
17
+ 2025-09-18 17:14:49,847 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1225 | Val rms_score: 0.7980
18
+ 2025-09-18 17:14:59,132 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1146 | Val rms_score: 0.6597
19
+ 2025-09-18 17:15:09,209 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1094 | Val rms_score: 0.8659
20
+ 2025-09-18 17:15:16,098 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0962 | Val rms_score: 0.8673
21
+ 2025-09-18 17:15:24,775 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0962 | Val rms_score: 0.7499
22
+ 2025-09-18 17:15:34,199 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0855 | Val rms_score: 0.7262
23
+ 2025-09-18 17:15:41,954 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0747 | Val rms_score: 0.7745
24
+ 2025-09-18 17:15:52,116 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0810 | Val rms_score: 0.7439
25
+ 2025-09-18 17:16:01,076 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0822 | Val rms_score: 0.7672
26
+ 2025-09-18 17:16:08,287 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0792 | Val rms_score: 0.7078
27
+ 2025-09-18 17:16:18,094 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0761 | Val rms_score: 0.7006
28
+ 2025-09-18 17:16:27,298 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0744 | Val rms_score: 0.7138
29
+ 2025-09-18 17:16:37,422 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0768 | Val rms_score: 0.7473
30
+ 2025-09-18 17:16:44,533 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0691 | Val rms_score: 0.7790
31
+ 2025-09-18 17:16:54,488 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0674 | Val rms_score: 0.7416
32
+ 2025-09-18 17:17:04,721 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0637 | Val rms_score: 0.7695
33
+ 2025-09-18 17:17:11,576 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0683 | Val rms_score: 0.7254
34
+ 2025-09-18 17:17:24,075 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0748 | Val rms_score: 0.7247
35
+ 2025-09-18 17:17:33,862 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0822 | Val rms_score: 0.8309
36
+ 2025-09-18 17:17:40,556 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0791 | Val rms_score: 0.7266
37
+ 2025-09-18 17:17:50,650 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0567 | Val rms_score: 0.7348
38
+ 2025-09-18 17:18:00,160 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0543 | Val rms_score: 0.6666
39
+ 2025-09-18 17:18:08,176 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0581 | Val rms_score: 0.7241
40
+ 2025-09-18 17:18:17,542 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0539 | Val rms_score: 0.8005
41
+ 2025-09-18 17:18:27,491 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0520 | Val rms_score: 0.7254
42
+ 2025-09-18 17:18:37,788 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0513 | Val rms_score: 0.6936
43
+ 2025-09-18 17:18:44,571 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0498 | Val rms_score: 0.7455
44
+ 2025-09-18 17:18:54,989 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0472 | Val rms_score: 0.7480
45
+ 2025-09-18 17:19:04,837 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0493 | Val rms_score: 0.7675
46
+ 2025-09-18 17:19:11,967 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0489 | Val rms_score: 0.8136
47
+ 2025-09-18 17:19:22,123 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0566 | Val rms_score: 0.7603
48
+ 2025-09-18 17:19:31,270 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0458 | Val rms_score: 0.7868
49
+ 2025-09-18 17:19:38,812 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0477 | Val rms_score: 0.7805
50
+ 2025-09-18 17:19:48,305 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0457 | Val rms_score: 0.8085
51
+ 2025-09-18 17:19:58,429 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0463 | Val rms_score: 0.7693
52
+ 2025-09-18 17:20:07,760 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0523 | Val rms_score: 0.7259
53
+ 2025-09-18 17:20:15,162 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0516 | Val rms_score: 0.7604
54
+ 2025-09-18 17:20:25,564 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0510 | Val rms_score: 0.7903
55
+ 2025-09-18 17:20:34,921 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0465 | Val rms_score: 0.7994
56
+ 2025-09-18 17:20:42,492 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0440 | Val rms_score: 0.7235
57
+ 2025-09-18 17:20:51,897 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0434 | Val rms_score: 0.8259
58
+ 2025-09-18 17:21:00,559 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0467 | Val rms_score: 0.8070
59
+ 2025-09-18 17:21:07,724 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0452 | Val rms_score: 0.7628
60
+ 2025-09-18 17:21:18,913 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0480 | Val rms_score: 0.7772
61
+ 2025-09-18 17:21:27,588 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0448 | Val rms_score: 0.8267
62
+ 2025-09-18 17:21:33,723 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0500 | Val rms_score: 0.7640
63
+ 2025-09-18 17:21:39,523 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0407 | Val rms_score: 0.7557
64
+ 2025-09-18 17:21:43,065 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0370 | Val rms_score: 0.8080
65
+ 2025-09-18 17:21:48,804 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0295 | Val rms_score: 0.7998
66
+ 2025-09-18 17:21:54,567 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0389 | Val rms_score: 0.7257
67
+ 2025-09-18 17:22:00,323 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0372 | Val rms_score: 0.7712
68
+ 2025-09-18 17:22:06,117 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0408 | Val rms_score: 0.7932
69
+ 2025-09-18 17:22:09,730 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0393 | Val rms_score: 0.8175
70
+ 2025-09-18 17:22:15,723 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0415 | Val rms_score: 0.7319
71
+ 2025-09-18 17:22:21,748 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0374 | Val rms_score: 0.7618
72
+ 2025-09-18 17:22:27,683 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0341 | Val rms_score: 0.7584
73
+ 2025-09-18 17:22:33,603 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0527 | Val rms_score: 0.7648
74
+ 2025-09-18 17:22:39,821 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0372 | Val rms_score: 0.7474
75
+ 2025-09-18 17:22:43,258 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0391 | Val rms_score: 0.7086
76
+ 2025-09-18 17:22:49,170 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0357 | Val rms_score: 0.7336
77
+ 2025-09-18 17:22:55,202 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0347 | Val rms_score: 0.7818
78
+ 2025-09-18 17:23:01,099 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0331 | Val rms_score: 0.7238
79
+ 2025-09-18 17:23:07,317 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0369 | Val rms_score: 0.7170
80
+ 2025-09-18 17:23:10,836 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0356 | Val rms_score: 0.8005
81
+ 2025-09-18 17:23:16,779 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0329 | Val rms_score: 0.7420
82
+ 2025-09-18 17:23:22,652 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0347 | Val rms_score: 0.7254
83
+ 2025-09-18 17:23:28,493 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0393 | Val rms_score: 0.8046
84
+ 2025-09-18 17:23:34,667 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0344 | Val rms_score: 0.8078
85
+ 2025-09-18 17:23:37,969 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0378 | Val rms_score: 0.7521
86
+ 2025-09-18 17:23:44,926 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0347 | Val rms_score: 0.7122
87
+ 2025-09-18 17:23:50,817 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0327 | Val rms_score: 0.7579
88
+ 2025-09-18 17:23:56,606 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0327 | Val rms_score: 0.7117
89
+ 2025-09-18 17:24:02,718 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0330 | Val rms_score: 0.7321
90
+ 2025-09-18 17:24:08,583 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0323 | Val rms_score: 0.7639
91
+ 2025-09-18 17:24:11,982 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0335 | Val rms_score: 0.7820
92
+ 2025-09-18 17:24:17,673 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0346 | Val rms_score: 0.7775
93
+ 2025-09-18 17:24:23,542 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0337 | Val rms_score: 0.7578
94
+ 2025-09-18 17:24:29,686 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0423 | Val rms_score: 0.7657
95
+ 2025-09-18 17:24:35,423 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0337 | Val rms_score: 0.7833
96
+ 2025-09-18 17:24:38,670 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0337 | Val rms_score: 0.7793
97
+ 2025-09-18 17:24:44,339 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0406 | Val rms_score: 0.7949
98
+ 2025-09-18 17:24:50,190 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0315 | Val rms_score: 0.7415
99
+ 2025-09-18 17:24:56,272 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0364 | Val rms_score: 0.7496
100
+ 2025-09-18 17:25:02,135 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0365 | Val rms_score: 0.7280
101
+ 2025-09-18 17:25:08,035 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0319 | Val rms_score: 0.7850
102
+ 2025-09-18 17:25:11,536 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0297 | Val rms_score: 0.7224
103
+ 2025-09-18 17:25:17,315 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0296 | Val rms_score: 0.7771
104
+ 2025-09-18 17:25:23,306 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0302 | Val rms_score: 0.7814
105
+ 2025-09-18 17:25:29,345 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0334 | Val rms_score: 0.7295
106
+ 2025-09-18 17:25:34,981 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0304 | Val rms_score: 0.8043
107
+ 2025-09-18 17:25:38,231 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0321 | Val rms_score: 0.7310
108
+ 2025-09-18 17:25:38,843 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 1.0019
109
+ 2025-09-18 17:25:39,126 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_regression at 2025-09-18_17-25-39
110
+ 2025-09-18 17:25:44,217 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5526 | Val rms_score: 0.8398
111
+ 2025-09-18 17:25:44,217 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
112
+ 2025-09-18 17:25:44,835 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.8398
113
+ 2025-09-18 17:25:50,634 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2961 | Val rms_score: 0.7086
114
+ 2025-09-18 17:25:50,800 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
115
+ 2025-09-18 17:25:51,374 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7086
116
+ 2025-09-18 17:25:57,155 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2143 | Val rms_score: 0.6783
117
+ 2025-09-18 17:25:57,345 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 114
118
+ 2025-09-18 17:25:57,919 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.6783
119
+ 2025-09-18 17:26:03,720 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2007 | Val rms_score: 0.9306
120
+ 2025-09-18 17:26:09,385 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1727 | Val rms_score: 0.7808
121
+ 2025-09-18 17:26:12,504 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1529 | Val rms_score: 0.8175
122
+ 2025-09-18 17:26:18,339 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1414 | Val rms_score: 0.6851
123
+ 2025-09-18 17:26:23,917 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1309 | Val rms_score: 0.7593
124
+ 2025-09-18 17:26:29,509 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1266 | Val rms_score: 0.8449
125
+ 2025-09-18 17:26:35,041 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1168 | Val rms_score: 0.7541
126
+ 2025-09-18 17:26:38,179 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1207 | Val rms_score: 0.7304
127
+ 2025-09-18 17:26:44,121 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1110 | Val rms_score: 0.6842
128
+ 2025-09-18 17:26:49,798 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0979 | Val rms_score: 0.8025
129
+ 2025-09-18 17:26:55,366 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1094 | Val rms_score: 0.7921
130
+ 2025-09-18 17:27:01,121 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0917 | Val rms_score: 0.7627
131
+ 2025-09-18 17:27:06,995 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1230 | Val rms_score: 0.7440
132
+ 2025-09-18 17:27:10,712 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0831 | Val rms_score: 0.7277
133
+ 2025-09-18 17:27:16,609 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0855 | Val rms_score: 0.8170
134
+ 2025-09-18 17:27:22,426 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0820 | Val rms_score: 0.7156
135
+ 2025-09-18 17:27:28,209 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0773 | Val rms_score: 0.6998
136
+ 2025-09-18 17:27:34,004 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0757 | Val rms_score: 0.6867
137
+ 2025-09-18 17:27:40,111 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0690 | Val rms_score: 0.6851
138
+ 2025-09-18 17:27:43,396 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0658 | Val rms_score: 0.7534
139
+ 2025-09-18 17:27:49,102 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0729 | Val rms_score: 0.7614
140
+ 2025-09-18 17:27:55,067 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0576 | Val rms_score: 0.7642
141
+ 2025-09-18 17:28:00,833 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0625 | Val rms_score: 0.7157
142
+ 2025-09-18 17:28:07,893 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0700 | Val rms_score: 0.7190
143
+ 2025-09-18 17:28:11,380 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0633 | Val rms_score: 0.7146
144
+ 2025-09-18 17:28:17,093 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0859 | Val rms_score: 0.7422
145
+ 2025-09-18 17:28:22,744 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0530 | Val rms_score: 0.7901
146
+ 2025-09-18 17:28:28,389 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0588 | Val rms_score: 0.7610
147
+ 2025-09-18 17:28:34,316 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0500 | Val rms_score: 0.7198
148
+ 2025-09-18 17:28:40,203 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0504 | Val rms_score: 0.7495
149
+ 2025-09-18 17:28:43,587 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0504 | Val rms_score: 0.7336
150
+ 2025-09-18 17:28:49,271 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0497 | Val rms_score: 0.7265
151
+ 2025-09-18 17:28:54,885 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0555 | Val rms_score: 0.7945
152
+ 2025-09-18 17:29:00,839 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0563 | Val rms_score: 0.7175
153
+ 2025-09-18 17:29:06,621 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0596 | Val rms_score: 0.7716
154
+ 2025-09-18 17:29:09,932 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0495 | Val rms_score: 0.7618
155
+ 2025-09-18 17:29:15,830 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0566 | Val rms_score: 0.7650
156
+ 2025-09-18 17:29:21,581 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0604 | Val rms_score: 0.7970
157
+ 2025-09-18 17:29:27,661 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0475 | Val rms_score: 0.7164
158
+ 2025-09-18 17:29:33,419 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0464 | Val rms_score: 0.7996
159
+ 2025-09-18 17:29:39,084 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0479 | Val rms_score: 0.7284
160
+ 2025-09-18 17:29:42,392 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0393 | Val rms_score: 0.7403
161
+ 2025-09-18 17:29:48,179 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0419 | Val rms_score: 0.7432
162
+ 2025-09-18 17:29:54,238 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0407 | Val rms_score: 0.7680
163
+ 2025-09-18 17:30:00,038 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0462 | Val rms_score: 0.7050
164
+ 2025-09-18 17:30:05,917 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0405 | Val rms_score: 0.6837
165
+ 2025-09-18 17:30:09,163 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0403 | Val rms_score: 0.7485
166
+ 2025-09-18 17:30:14,913 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0405 | Val rms_score: 0.7130
167
+ 2025-09-18 17:30:20,977 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0413 | Val rms_score: 0.7312
168
+ 2025-09-18 17:30:27,654 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0469 | Val rms_score: 0.7794
169
+ 2025-09-18 17:30:33,354 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0426 | Val rms_score: 0.7363
170
+ 2025-09-18 17:30:39,029 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0440 | Val rms_score: 0.7404
171
+ 2025-09-18 17:30:42,274 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0505 | Val rms_score: 0.8027
172
+ 2025-09-18 17:30:48,177 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0382 | Val rms_score: 0.7456
173
+ 2025-09-18 17:30:53,882 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0591 | Val rms_score: 0.7836
174
+ 2025-09-18 17:30:59,476 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0393 | Val rms_score: 0.7192
175
+ 2025-09-18 17:31:05,049 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0393 | Val rms_score: 0.7587
176
+ 2025-09-18 17:31:10,755 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0378 | Val rms_score: 0.7577
177
+ 2025-09-18 17:31:14,186 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0380 | Val rms_score: 0.7629
178
+ 2025-09-18 17:31:19,766 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0411 | Val rms_score: 0.7470
179
+ 2025-09-18 17:31:25,382 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0398 | Val rms_score: 0.7482
180
+ 2025-09-18 17:31:31,135 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0461 | Val rms_score: 0.6949
181
+ 2025-09-18 17:31:36,835 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0481 | Val rms_score: 0.7238
182
+ 2025-09-18 17:31:40,165 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0345 | Val rms_score: 0.7389
183
+ 2025-09-18 17:31:45,829 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0345 | Val rms_score: 0.7475
184
+ 2025-09-18 17:31:51,459 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0426 | Val rms_score: 0.7735
185
+ 2025-09-18 17:31:57,169 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0399 | Val rms_score: 0.7447
186
+ 2025-09-18 17:32:02,729 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0360 | Val rms_score: 0.7227
187
+ 2025-09-18 17:32:08,640 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0391 | Val rms_score: 0.8290
188
+ 2025-09-18 17:32:11,787 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0380 | Val rms_score: 0.7388
189
+ 2025-09-18 17:32:17,419 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0449 | Val rms_score: 0.7839
190
+ 2025-09-18 17:32:23,165 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0380 | Val rms_score: 0.7115
191
+ 2025-09-18 17:32:28,815 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0354 | Val rms_score: 0.7523
192
+ 2025-09-18 17:32:34,829 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0338 | Val rms_score: 0.7480
193
+ 2025-09-18 17:32:40,495 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0317 | Val rms_score: 0.7523
194
+ 2025-09-18 17:32:44,774 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0200 | Val rms_score: 0.7918
195
+ 2025-09-18 17:32:50,504 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0298 | Val rms_score: 0.7265
196
+ 2025-09-18 17:32:56,329 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0341 | Val rms_score: 0.7133
197
+ 2025-09-18 17:33:02,256 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0386 | Val rms_score: 0.7636
198
+ 2025-09-18 17:33:08,011 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0366 | Val rms_score: 0.7846
199
+ 2025-09-18 17:33:11,210 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0350 | Val rms_score: 0.7452
200
+ 2025-09-18 17:33:16,934 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0344 | Val rms_score: 0.7052
201
+ 2025-09-18 17:33:22,547 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0345 | Val rms_score: 0.7514
202
+ 2025-09-18 17:33:28,447 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0239 | Val rms_score: 0.7510
203
+ 2025-09-18 17:33:34,142 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0304 | Val rms_score: 0.7760
204
+ 2025-09-18 17:33:39,929 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0317 | Val rms_score: 0.7596
205
+ 2025-09-18 17:33:43,174 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0359 | Val rms_score: 0.7448
206
+ 2025-09-18 17:33:48,885 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0304 | Val rms_score: 0.7259
207
+ 2025-09-18 17:33:54,769 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0288 | Val rms_score: 0.7292
208
+ 2025-09-18 17:34:00,467 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0279 | Val rms_score: 0.7751
209
+ 2025-09-18 17:34:06,226 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0345 | Val rms_score: 0.7164
210
+ 2025-09-18 17:34:09,471 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0326 | Val rms_score: 0.7245
211
+ 2025-09-18 17:34:15,224 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0323 | Val rms_score: 0.7350
212
+ 2025-09-18 17:34:21,429 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0263 | Val rms_score: 0.7420
213
+ 2025-09-18 17:34:27,972 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0326 | Val rms_score: 0.7160
214
+ 2025-09-18 17:34:36,726 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0345 | Val rms_score: 0.7388
215
+ 2025-09-18 17:34:42,981 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0308 | Val rms_score: 0.7508
216
+ 2025-09-18 17:34:43,947 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 0.9634
217
+ 2025-09-18 17:34:44,275 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_regression at 2025-09-18_17-34-44
218
+ 2025-09-18 17:34:51,312 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5362 | Val rms_score: 0.6726
219
+ 2025-09-18 17:34:51,312 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
220
+ 2025-09-18 17:34:51,913 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.6726
221
+ 2025-09-18 17:35:00,590 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2812 | Val rms_score: 0.6692
222
+ 2025-09-18 17:35:00,769 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
223
+ 2025-09-18 17:35:01,369 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6692
224
+ 2025-09-18 17:35:08,578 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2589 | Val rms_score: 0.6894
225
+ 2025-09-18 17:35:17,417 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2188 | Val rms_score: 0.7468
226
+ 2025-09-18 17:35:26,998 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1842 | Val rms_score: 0.7241
227
+ 2025-09-18 17:35:35,865 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1696 | Val rms_score: 0.7738
228
+ 2025-09-18 17:35:43,506 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1406 | Val rms_score: 0.7253
229
+ 2025-09-18 17:35:52,317 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1562 | Val rms_score: 0.8999
230
+ 2025-09-18 17:36:00,650 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1340 | Val rms_score: 0.6999
231
+ 2025-09-18 17:36:09,614 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1160 | Val rms_score: 0.7100
232
+ 2025-09-18 17:36:16,841 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1181 | Val rms_score: 0.7389
233
+ 2025-09-18 17:36:25,753 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1110 | Val rms_score: 0.7168
234
+ 2025-09-18 17:36:35,090 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1160 | Val rms_score: 0.7724
235
+ 2025-09-18 17:36:42,312 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0933 | Val rms_score: 0.8625
236
+ 2025-09-18 17:36:51,276 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0929 | Val rms_score: 0.7844
237
+ 2025-09-18 17:37:00,712 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1099 | Val rms_score: 0.7561
238
+ 2025-09-18 17:37:10,595 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0851 | Val rms_score: 0.8714
239
+ 2025-09-18 17:37:17,018 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0896 | Val rms_score: 0.7511
240
+ 2025-09-18 17:37:26,633 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1001 | Val rms_score: 0.6777
241
+ 2025-09-18 17:37:35,957 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0863 | Val rms_score: 0.7014
242
+ 2025-09-18 17:37:43,001 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0831 | Val rms_score: 0.7105
243
+ 2025-09-18 17:37:52,562 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0720 | Val rms_score: 0.7820
244
+ 2025-09-18 17:38:01,583 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0666 | Val rms_score: 0.7513
245
+ 2025-09-18 17:38:11,090 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0749 | Val rms_score: 0.7455
246
+ 2025-09-18 17:38:17,675 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0641 | Val rms_score: 0.7514
247
+ 2025-09-18 17:38:26,953 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0785 | Val rms_score: 0.6929
248
+ 2025-09-18 17:38:38,341 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0610 | Val rms_score: 0.7316
249
+ 2025-09-18 17:38:45,389 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0621 | Val rms_score: 0.6829
250
+ 2025-09-18 17:38:54,193 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0942 | Val rms_score: 0.7743
251
+ 2025-09-18 17:39:03,786 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0625 | Val rms_score: 0.7341
252
+ 2025-09-18 17:39:10,145 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0584 | Val rms_score: 0.7617
253
+ 2025-09-18 17:39:19,556 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0732 | Val rms_score: 0.7607
254
+ 2025-09-18 17:39:29,359 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0621 | Val rms_score: 0.7556
255
+ 2025-09-18 17:39:38,398 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0535 | Val rms_score: 0.7971
256
+ 2025-09-18 17:39:45,070 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0555 | Val rms_score: 0.7284
257
+ 2025-09-18 17:39:54,678 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0516 | Val rms_score: 0.7741
258
+ 2025-09-18 17:40:04,071 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0449 | Val rms_score: 0.7690
259
+ 2025-09-18 17:40:10,884 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0489 | Val rms_score: 0.7680
260
+ 2025-09-18 17:40:20,295 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0518 | Val rms_score: 0.7279
261
+ 2025-09-18 17:40:28,889 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0566 | Val rms_score: 0.7879
262
+ 2025-09-18 17:40:38,394 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0520 | Val rms_score: 0.7451
263
+ 2025-09-18 17:40:44,998 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0530 | Val rms_score: 0.7172
264
+ 2025-09-18 17:40:54,183 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0494 | Val rms_score: 0.8229
265
+ 2025-09-18 17:41:03,187 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0458 | Val rms_score: 0.7364
266
+ 2025-09-18 17:41:09,795 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0459 | Val rms_score: 0.8454
267
+ 2025-09-18 17:41:19,074 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0428 | Val rms_score: 0.7496
268
+ 2025-09-18 17:41:28,584 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0419 | Val rms_score: 0.7702
269
+ 2025-09-18 17:41:37,637 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0492 | Val rms_score: 0.7471
270
+ 2025-09-18 17:41:44,324 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0444 | Val rms_score: 0.7247
271
+ 2025-09-18 17:41:53,310 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0456 | Val rms_score: 0.7591
272
+ 2025-09-18 17:42:02,910 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0409 | Val rms_score: 0.8076
273
+ 2025-09-18 17:42:09,862 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0471 | Val rms_score: 0.7143
274
+ 2025-09-18 17:42:18,938 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0460 | Val rms_score: 0.7580
275
+ 2025-09-18 17:42:28,294 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0442 | Val rms_score: 0.7363
276
+ 2025-09-18 17:42:37,816 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0405 | Val rms_score: 0.7239
277
+ 2025-09-18 17:42:44,438 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0360 | Val rms_score: 0.7877
278
+ 2025-09-18 17:42:54,116 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0354 | Val rms_score: 0.7521
279
+ 2025-09-18 17:43:03,352 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0349 | Val rms_score: 0.7545
280
+ 2025-09-18 17:43:09,983 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0411 | Val rms_score: 0.8029
281
+ 2025-09-18 17:43:19,083 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0389 | Val rms_score: 0.7645
282
+ 2025-09-18 17:43:28,249 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0425 | Val rms_score: 0.7614
283
+ 2025-09-18 17:43:37,361 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0421 | Val rms_score: 0.8378
284
+ 2025-09-18 17:43:44,056 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0438 | Val rms_score: 0.8079
285
+ 2025-09-18 17:43:52,639 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0398 | Val rms_score: 0.7977
286
+ 2025-09-18 17:44:01,363 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0352 | Val rms_score: 0.8123
287
+ 2025-09-18 17:44:11,478 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0415 | Val rms_score: 0.7758
288
+ 2025-09-18 17:44:18,853 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0339 | Val rms_score: 0.8027
289
+ 2025-09-18 17:44:27,768 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0481 | Val rms_score: 0.8100
290
+ 2025-09-18 17:44:37,391 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0554 | Val rms_score: 0.7531
291
+ 2025-09-18 17:44:44,538 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0487 | Val rms_score: 0.7285
292
+ 2025-09-18 17:44:53,364 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0399 | Val rms_score: 0.7486
293
+ 2025-09-18 17:45:03,141 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0378 | Val rms_score: 0.7326
294
+ 2025-09-18 17:45:10,248 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0358 | Val rms_score: 0.7471
295
+ 2025-09-18 17:45:19,056 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0356 | Val rms_score: 0.7428
296
+ 2025-09-18 17:45:28,692 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0329 | Val rms_score: 0.7263
297
+ 2025-09-18 17:45:38,365 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0310 | Val rms_score: 0.7243
298
+ 2025-09-18 17:45:45,027 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0344 | Val rms_score: 0.7213
299
+ 2025-09-18 17:45:54,442 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0308 | Val rms_score: 0.7670
300
+ 2025-09-18 17:46:04,809 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0234 | Val rms_score: 0.7892
301
+ 2025-09-18 17:46:11,906 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0358 | Val rms_score: 0.7594
302
+ 2025-09-18 17:46:20,684 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0329 | Val rms_score: 0.7644
303
+ 2025-09-18 17:46:30,213 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0342 | Val rms_score: 0.7880
304
+ 2025-09-18 17:46:39,261 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0302 | Val rms_score: 0.7231
305
+ 2025-09-18 17:46:45,856 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0323 | Val rms_score: 0.7068
306
+ 2025-09-18 17:46:55,383 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0375 | Val rms_score: 0.7560
307
+ 2025-09-18 17:47:04,452 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0345 | Val rms_score: 0.7375
308
+ 2025-09-18 17:47:11,369 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0443 | Val rms_score: 0.7193
309
+ 2025-09-18 17:47:20,026 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0323 | Val rms_score: 0.7146
310
+ 2025-09-18 17:47:28,908 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0335 | Val rms_score: 0.7046
311
+ 2025-09-18 17:47:38,363 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0328 | Val rms_score: 0.7149
312
+ 2025-09-18 17:47:45,108 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0310 | Val rms_score: 0.7615
313
+ 2025-09-18 17:47:54,217 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0347 | Val rms_score: 0.7374
314
+ 2025-09-18 17:48:03,737 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0368 | Val rms_score: 0.7492
315
+ 2025-09-18 17:48:10,118 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0335 | Val rms_score: 0.7445
316
+ 2025-09-18 17:48:19,532 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0256 | Val rms_score: 0.7802
317
+ 2025-09-18 17:48:28,777 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0275 | Val rms_score: 0.7754
318
+ 2025-09-18 17:48:37,909 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0292 | Val rms_score: 0.7555
319
+ 2025-09-18 17:48:44,636 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0282 | Val rms_score: 0.7532
320
+ 2025-09-18 17:48:53,839 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0271 | Val rms_score: 0.7289
321
+ 2025-09-18 17:49:02,800 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0304 | Val rms_score: 0.7822
322
+ 2025-09-18 17:49:03,745 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 0.9486
323
+ 2025-09-18 17:49:04,093 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.9713, Std Dev: 0.0224
logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250918_194635.log ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 19:46:35,137 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Running benchmark for dataset: clearance
2
+ 2025-09-18 19:46:35,137 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - dataset: clearance, tasks: ['target'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 19:46:35,141 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clearance at 2025-09-18_19-46-35
4
+ 2025-09-18 19:46:39,356 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 3.6905 | Val rms_score: 62.6914
5
+ 2025-09-18 19:46:39,356 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
6
+ 2025-09-18 19:46:40,298 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 62.6914
7
+ 2025-09-18 19:46:47,823 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.3214 | Val rms_score: 58.2984
8
+ 2025-09-18 19:46:47,991 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
9
+ 2025-09-18 19:46:48,744 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 58.2984
10
+ 2025-09-18 19:46:53,225 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 1.0119 | Val rms_score: 55.5790
11
+ 2025-09-18 19:46:53,411 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
12
+ 2025-09-18 19:46:54,219 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 55.5790
13
+ 2025-09-18 19:47:00,450 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.7917 | Val rms_score: 54.1590
14
+ 2025-09-18 19:47:00,622 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 84
15
+ 2025-09-18 19:47:01,156 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 54.1590
16
+ 2025-09-18 19:47:07,613 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4969 | Val rms_score: 55.5332
17
+ 2025-09-18 19:47:12,006 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4524 | Val rms_score: 58.7564
18
+ 2025-09-18 19:47:18,622 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3229 | Val rms_score: 57.3789
19
+ 2025-09-18 19:47:24,883 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2708 | Val rms_score: 57.7811
20
+ 2025-09-18 19:47:29,729 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.2009 | Val rms_score: 57.6949
21
+ 2025-09-18 19:47:37,359 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1711 | Val rms_score: 56.3387
22
+ 2025-09-18 19:47:43,713 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1265 | Val rms_score: 56.8482
23
+ 2025-09-18 19:47:48,897 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1138 | Val rms_score: 57.3929
24
+ 2025-09-18 19:47:55,252 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1064 | Val rms_score: 56.5976
25
+ 2025-09-18 19:48:00,990 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1138 | Val rms_score: 56.9020
26
+ 2025-09-18 19:48:05,462 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0896 | Val rms_score: 57.0659
27
+ 2025-09-18 19:48:11,721 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0882 | Val rms_score: 57.0207
28
+ 2025-09-18 19:48:18,308 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0882 | Val rms_score: 56.0088
29
+ 2025-09-18 19:48:23,179 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0733 | Val rms_score: 56.9183
30
+ 2025-09-18 19:48:29,418 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0677 | Val rms_score: 56.1092
31
+ 2025-09-18 19:48:35,443 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0648 | Val rms_score: 56.3149
32
+ 2025-09-18 19:48:40,804 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0629 | Val rms_score: 56.4417
33
+ 2025-09-18 19:48:47,543 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0580 | Val rms_score: 56.8949
34
+ 2025-09-18 19:48:53,914 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0692 | Val rms_score: 55.9307
35
+ 2025-09-18 19:48:59,611 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0918 | Val rms_score: 56.5021
36
+ 2025-09-18 19:49:04,947 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0766 | Val rms_score: 57.5608
37
+ 2025-09-18 19:49:10,801 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0766 | Val rms_score: 55.7029
38
+ 2025-09-18 19:49:17,400 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0707 | Val rms_score: 56.1967
39
+ 2025-09-18 19:49:21,926 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0640 | Val rms_score: 56.1851
40
+ 2025-09-18 19:49:28,199 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0621 | Val rms_score: 56.2926
41
+ 2025-09-18 19:49:34,459 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0543 | Val rms_score: 55.9700
42
+ 2025-09-18 19:49:39,128 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0487 | Val rms_score: 55.6302
43
+ 2025-09-18 19:49:45,729 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0551 | Val rms_score: 55.5203
44
+ 2025-09-18 19:49:51,485 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0491 | Val rms_score: 55.6116
45
+ 2025-09-18 19:49:56,834 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0497 | Val rms_score: 56.1143
46
+ 2025-09-18 19:50:01,821 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0469 | Val rms_score: 55.2848
47
+ 2025-09-18 19:50:08,197 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0495 | Val rms_score: 55.9708
48
+ 2025-09-18 19:50:14,741 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0491 | Val rms_score: 55.6862
49
+ 2025-09-18 19:50:19,056 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0510 | Val rms_score: 55.9657
50
+ 2025-09-18 19:50:25,260 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0467 | Val rms_score: 56.3163
51
+ 2025-09-18 19:50:30,987 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0506 | Val rms_score: 56.3644
52
+ 2025-09-18 19:50:35,899 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0539 | Val rms_score: 55.2901
53
+ 2025-09-18 19:50:42,493 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0480 | Val rms_score: 55.5683
54
+ 2025-09-18 19:50:48,840 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0420 | Val rms_score: 55.8212
55
+ 2025-09-18 19:50:54,253 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0472 | Val rms_score: 55.4570
56
+ 2025-09-18 19:51:00,460 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0394 | Val rms_score: 56.7579
57
+ 2025-09-18 19:51:06,270 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0495 | Val rms_score: 56.3934
58
+ 2025-09-18 19:51:12,951 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0454 | Val rms_score: 55.4423
59
+ 2025-09-18 19:51:19,916 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0364 | Val rms_score: 55.7788
60
+ 2025-09-18 19:51:25,712 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0368 | Val rms_score: 56.2301
61
+ 2025-09-18 19:51:31,863 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0387 | Val rms_score: 55.8655
62
+ 2025-09-18 19:51:38,015 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0372 | Val rms_score: 55.5034
63
+ 2025-09-18 19:51:44,589 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0374 | Val rms_score: 55.1712
64
+ 2025-09-18 19:51:50,710 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0377 | Val rms_score: 56.2826
65
+ 2025-09-18 19:51:56,004 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0406 | Val rms_score: 56.3352
66
+ 2025-09-18 19:52:02,400 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0437 | Val rms_score: 56.4326
67
+ 2025-09-18 19:52:08,681 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0446 | Val rms_score: 55.2594
68
+ 2025-09-18 19:52:13,889 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0420 | Val rms_score: 54.9568
69
+ 2025-09-18 19:52:20,235 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0443 | Val rms_score: 56.9924
70
+ 2025-09-18 19:52:26,021 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0528 | Val rms_score: 55.6995
71
+ 2025-09-18 19:52:30,944 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0383 | Val rms_score: 55.3558
72
+ 2025-09-18 19:52:36,780 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0344 | Val rms_score: 55.3613
73
+ 2025-09-18 19:52:43,374 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0272 | Val rms_score: 55.8361
74
+ 2025-09-18 19:52:48,140 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0320 | Val rms_score: 56.4965
75
+ 2025-09-18 19:52:56,266 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0407 | Val rms_score: 55.7122
76
+ 2025-09-18 19:53:02,289 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0510 | Val rms_score: 55.9896
77
+ 2025-09-18 19:53:07,237 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0340 | Val rms_score: 55.4430
78
+ 2025-09-18 19:53:13,927 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0435 | Val rms_score: 54.9932
79
+ 2025-09-18 19:53:20,206 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0392 | Val rms_score: 54.5117
80
+ 2025-09-18 19:53:25,309 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0350 | Val rms_score: 55.3269
81
+ 2025-09-18 19:53:31,165 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0359 | Val rms_score: 55.0601
82
+ 2025-09-18 19:53:37,583 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0327 | Val rms_score: 54.9859
83
+ 2025-09-18 19:53:43,270 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0335 | Val rms_score: 56.2761
84
+ 2025-09-18 19:53:49,467 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0353 | Val rms_score: 55.8232
85
+ 2025-09-18 19:53:55,755 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0270 | Val rms_score: 55.1594
86
+ 2025-09-18 19:54:01,593 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0307 | Val rms_score: 55.4578
87
+ 2025-09-18 19:54:07,290 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0273 | Val rms_score: 55.7238
88
+ 2025-09-18 19:54:13,970 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0267 | Val rms_score: 55.2494
89
+ 2025-09-18 19:54:20,135 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0333 | Val rms_score: 55.0838
90
+ 2025-09-18 19:54:25,106 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0277 | Val rms_score: 55.5646
91
+ 2025-09-18 19:54:36,479 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0294 | Val rms_score: 55.4062
92
+ 2025-09-18 19:54:37,170 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0306 | Val rms_score: 55.3238
93
+ 2025-09-18 19:54:41,853 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0326 | Val rms_score: 55.8975
94
+ 2025-09-18 19:54:48,177 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0281 | Val rms_score: 55.0471
95
+ 2025-09-18 19:54:54,448 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0286 | Val rms_score: 55.7573
96
+ 2025-09-18 19:54:59,137 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0253 | Val rms_score: 55.1406
97
+ 2025-09-18 19:55:05,469 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0296 | Val rms_score: 55.9900
98
+ 2025-09-18 19:55:11,569 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0275 | Val rms_score: 55.0376
99
+ 2025-09-18 19:55:16,965 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0272 | Val rms_score: 55.5419
100
+ 2025-09-18 19:55:23,275 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0249 | Val rms_score: 55.9060
101
+ 2025-09-18 19:55:29,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0281 | Val rms_score: 55.4338
102
+ 2025-09-18 19:55:36,002 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0304 | Val rms_score: 55.4105
103
+ 2025-09-18 19:55:41,993 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0285 | Val rms_score: 54.8041
104
+ 2025-09-18 19:55:48,209 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0260 | Val rms_score: 54.8831
105
+ 2025-09-18 19:55:54,417 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0279 | Val rms_score: 55.2829
106
+ 2025-09-18 19:55:59,612 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0238 | Val rms_score: 55.2855
107
+ 2025-09-18 19:56:06,345 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0220 | Val rms_score: 55.3680
108
+ 2025-09-18 19:56:13,126 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0229 | Val rms_score: 54.5116
109
+ 2025-09-18 19:56:17,934 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0281 | Val rms_score: 55.7827
110
+ 2025-09-18 19:56:24,147 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0253 | Val rms_score: 54.5794
111
+ 2025-09-18 19:56:30,525 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0383 | Val rms_score: 54.1793
112
+ 2025-09-18 19:56:31,202 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 45.1371
113
+ 2025-09-18 19:56:36,645 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clearance at 2025-09-18_19-56-36
114
+ 2025-09-18 19:56:35,525 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 3.5952 | Val rms_score: 62.9769
115
+ 2025-09-18 19:56:35,525 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
116
+ 2025-09-18 19:56:36,238 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 62.9769
117
+ 2025-09-18 19:56:42,447 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.3274 | Val rms_score: 58.4332
118
+ 2025-09-18 19:56:42,617 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
119
+ 2025-09-18 19:56:43,436 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 58.4332
120
+ 2025-09-18 19:56:50,870 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.9940 | Val rms_score: 50.3284
121
+ 2025-09-18 19:56:51,045 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
122
+ 2025-09-18 19:56:56,718 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 50.3284
123
+ 2025-09-18 19:56:57,228 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.7798 | Val rms_score: 52.2096
124
+ 2025-09-18 19:57:03,529 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.6188 | Val rms_score: 53.8042
125
+ 2025-09-18 19:57:09,695 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4256 | Val rms_score: 55.9739
126
+ 2025-09-18 19:57:15,738 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3214 | Val rms_score: 54.1876
127
+ 2025-09-18 19:57:21,430 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2545 | Val rms_score: 55.8690
128
+ 2025-09-18 19:57:27,718 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1756 | Val rms_score: 57.1332
129
+ 2025-09-18 19:57:32,518 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1766 | Val rms_score: 58.1191
130
+ 2025-09-18 19:57:38,902 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1496 | Val rms_score: 56.4547
131
+ 2025-09-18 19:57:45,835 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1257 | Val rms_score: 57.3525
132
+ 2025-09-18 19:57:50,277 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1034 | Val rms_score: 56.6083
133
+ 2025-09-18 19:58:01,697 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1034 | Val rms_score: 56.9140
134
+ 2025-09-18 19:58:02,474 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1010 | Val rms_score: 58.0199
135
+ 2025-09-18 19:58:07,650 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1086 | Val rms_score: 56.6389
136
+ 2025-09-18 19:58:16,003 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1019 | Val rms_score: 56.4618
137
+ 2025-09-18 19:58:22,047 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0815 | Val rms_score: 57.2611
138
+ 2025-09-18 19:58:27,416 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0763 | Val rms_score: 56.9762
139
+ 2025-09-18 19:58:33,659 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0645 | Val rms_score: 56.1339
140
+ 2025-09-18 19:58:39,917 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0662 | Val rms_score: 56.6876
141
+ 2025-09-18 19:58:45,216 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0685 | Val rms_score: 57.7324
142
+ 2025-09-18 19:58:51,479 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0610 | Val rms_score: 56.8543
143
+ 2025-09-18 19:58:56,248 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0908 | Val rms_score: 56.8804
144
+ 2025-09-18 19:59:01,633 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0603 | Val rms_score: 57.3657
145
+ 2025-09-18 19:59:07,520 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0588 | Val rms_score: 56.7870
146
+ 2025-09-18 19:59:14,098 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0618 | Val rms_score: 56.5896
147
+ 2025-09-18 19:59:20,565 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0573 | Val rms_score: 56.0687
148
+ 2025-09-18 19:59:31,835 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0506 | Val rms_score: 56.7242
149
+ 2025-09-18 19:59:32,550 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0528 | Val rms_score: 56.8227
150
+ 2025-09-18 19:59:38,757 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0595 | Val rms_score: 56.5098
151
+ 2025-09-18 19:59:44,880 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0543 | Val rms_score: 56.9100
152
+ 2025-09-18 19:59:51,165 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0569 | Val rms_score: 56.2173
153
+ 2025-09-18 19:59:56,840 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0508 | Val rms_score: 57.3485
154
+ 2025-09-18 20:00:06,853 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0670 | Val rms_score: 56.9618
155
+ 2025-09-18 20:00:07,548 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0878 | Val rms_score: 56.8092
156
+ 2025-09-18 20:00:14,442 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0722 | Val rms_score: 56.2364
157
+ 2025-09-18 20:00:18,832 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0636 | Val rms_score: 57.1362
158
+ 2025-09-18 20:00:25,172 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0633 | Val rms_score: 56.1924
159
+ 2025-09-18 20:00:31,505 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0495 | Val rms_score: 56.1080
160
+ 2025-09-18 20:00:36,360 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0539 | Val rms_score: 56.6285
161
+ 2025-09-18 20:00:42,538 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0640 | Val rms_score: 56.4176
162
+ 2025-09-18 20:00:48,866 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0521 | Val rms_score: 55.9877
163
+ 2025-09-18 20:00:54,193 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0461 | Val rms_score: 56.3941
164
+ 2025-09-18 20:01:00,728 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0476 | Val rms_score: 56.0940
165
+ 2025-09-18 20:01:12,085 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0476 | Val rms_score: 55.9978
166
+ 2025-09-18 20:01:12,684 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0420 | Val rms_score: 55.3136
167
+ 2025-09-18 20:01:20,011 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0605 | Val rms_score: 55.8865
168
+ 2025-09-18 20:01:26,245 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0551 | Val rms_score: 56.4225
169
+ 2025-09-18 20:01:31,821 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0415 | Val rms_score: 56.5296
170
+ 2025-09-18 20:01:37,373 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0374 | Val rms_score: 56.6031
171
+ 2025-09-18 20:01:43,917 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0357 | Val rms_score: 55.8345
172
+ 2025-09-18 20:01:50,268 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0421 | Val rms_score: 56.0313
173
+ 2025-09-18 20:01:56,312 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0411 | Val rms_score: 55.4016
174
+ 2025-09-18 20:02:02,239 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0448 | Val rms_score: 55.3798
175
+ 2025-09-18 20:02:08,496 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0404 | Val rms_score: 56.4387
176
+ 2025-09-18 20:02:13,196 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0430 | Val rms_score: 55.4282
177
+ 2025-09-18 20:02:19,359 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0386 | Val rms_score: 55.7733
178
+ 2025-09-18 20:02:25,718 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0350 | Val rms_score: 55.9944
179
+ 2025-09-18 20:02:30,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0391 | Val rms_score: 55.7844
180
+ 2025-09-18 20:02:36,950 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0363 | Val rms_score: 56.2515
181
+ 2025-09-18 20:02:43,106 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0276 | Val rms_score: 55.3067
182
+ 2025-09-18 20:02:48,538 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0398 | Val rms_score: 56.0417
183
+ 2025-09-18 20:02:54,888 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0322 | Val rms_score: 55.4926
184
+ 2025-09-18 20:03:01,162 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0344 | Val rms_score: 56.3395
185
+ 2025-09-18 20:03:06,823 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0359 | Val rms_score: 55.6457
186
+ 2025-09-18 20:03:12,794 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0338 | Val rms_score: 56.3094
187
+ 2025-09-18 20:03:18,981 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0309 | Val rms_score: 56.6489
188
+ 2025-09-18 20:03:25,230 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0378 | Val rms_score: 56.3869
189
+ 2025-09-18 20:03:30,065 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0454 | Val rms_score: 54.8558
190
+ 2025-09-18 20:03:36,310 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0487 | Val rms_score: 53.7985
191
+ 2025-09-18 20:03:43,407 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0482 | Val rms_score: 55.3471
192
+ 2025-09-18 20:03:48,548 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0350 | Val rms_score: 55.7825
193
+ 2025-09-18 20:03:54,779 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0372 | Val rms_score: 55.4171
194
+ 2025-09-18 20:04:01,788 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0316 | Val rms_score: 55.8024
195
+ 2025-09-18 20:04:06,684 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0342 | Val rms_score: 54.9626
196
+ 2025-09-18 20:04:12,846 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0308 | Val rms_score: 55.9100
197
+ 2025-09-18 20:04:19,153 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0327 | Val rms_score: 55.2292
198
+ 2025-09-18 20:04:23,517 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0307 | Val rms_score: 55.2351
199
+ 2025-09-18 20:04:29,861 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0273 | Val rms_score: 55.8314
200
+ 2025-09-18 20:04:36,126 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0381 | Val rms_score: 55.3137
201
+ 2025-09-18 20:04:41,054 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0326 | Val rms_score: 55.6243
202
+ 2025-09-18 20:04:47,183 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0340 | Val rms_score: 55.7637
203
+ 2025-09-18 20:04:52,908 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0316 | Val rms_score: 55.3387
204
+ 2025-09-18 20:04:59,159 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0318 | Val rms_score: 54.8292
205
+ 2025-09-18 20:05:05,003 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0241 | Val rms_score: 55.1223
206
+ 2025-09-18 20:05:11,799 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0333 | Val rms_score: 55.2637
207
+ 2025-09-18 20:05:17,527 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0279 | Val rms_score: 55.4613
208
+ 2025-09-18 20:05:23,408 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0335 | Val rms_score: 55.3967
209
+ 2025-09-18 20:05:29,674 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0294 | Val rms_score: 54.9614
210
+ 2025-09-18 20:05:35,951 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0259 | Val rms_score: 55.5328
211
+ 2025-09-18 20:05:41,466 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0312 | Val rms_score: 55.1459
212
+ 2025-09-18 20:05:47,377 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0262 | Val rms_score: 55.5046
213
+ 2025-09-18 20:05:53,674 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0309 | Val rms_score: 55.5252
214
+ 2025-09-18 20:05:57,877 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0322 | Val rms_score: 55.3762
215
+ 2025-09-18 20:06:05,169 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0312 | Val rms_score: 54.8547
216
+ 2025-09-18 20:06:11,794 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0301 | Val rms_score: 55.4046
217
+ 2025-09-18 20:06:16,277 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0339 | Val rms_score: 54.4464
218
+ 2025-09-18 20:06:22,028 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0326 | Val rms_score: 54.7559
219
+ 2025-09-18 20:06:28,306 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0298 | Val rms_score: 55.1189
220
+ 2025-09-18 20:06:29,028 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 45.1989
221
+ 2025-09-18 20:06:29,369 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clearance at 2025-09-18_20-06-29
222
+ 2025-09-18 20:06:32,896 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 3.5714 | Val rms_score: 60.9332
223
+ 2025-09-18 20:06:32,896 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
224
+ 2025-09-18 20:06:34,234 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 60.9332
225
+ 2025-09-18 20:06:40,707 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.3274 | Val rms_score: 53.6148
226
+ 2025-09-18 20:06:40,881 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
227
+ 2025-09-18 20:06:41,406 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 53.6148
228
+ 2025-09-18 20:06:47,911 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 1.0238 | Val rms_score: 55.9425
229
+ 2025-09-18 20:06:53,745 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.8155 | Val rms_score: 53.5317
230
+ 2025-09-18 20:06:53,934 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 84
231
+ 2025-09-18 20:06:54,908 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 53.5317
232
+ 2025-09-18 20:07:01,312 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.7750 | Val rms_score: 58.1521
233
+ 2025-09-18 20:07:07,326 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4643 | Val rms_score: 52.0647
234
+ 2025-09-18 20:07:07,331 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 126
235
+ 2025-09-18 20:07:07,913 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 52.0647
236
+ 2025-09-18 20:07:14,259 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4018 | Val rms_score: 53.7220
237
+ 2025-09-18 20:07:20,597 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2976 | Val rms_score: 56.1251
238
+ 2025-09-18 20:07:26,267 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.2262 | Val rms_score: 56.4600
239
+ 2025-09-18 20:07:37,738 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1938 | Val rms_score: 56.7262
240
+ 2025-09-18 20:07:38,387 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1533 | Val rms_score: 55.9643
241
+ 2025-09-18 20:07:44,968 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1324 | Val rms_score: 55.2026
242
+ 2025-09-18 20:07:50,416 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1176 | Val rms_score: 56.2619
243
+ 2025-09-18 20:07:56,694 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1161 | Val rms_score: 55.3389
244
+ 2025-09-18 20:08:02,418 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1135 | Val rms_score: 56.3215
245
+ 2025-09-18 20:08:07,360 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1027 | Val rms_score: 55.1998
246
+ 2025-09-18 20:08:14,052 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0893 | Val rms_score: 56.5436
247
+ 2025-09-18 20:08:20,252 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0766 | Val rms_score: 56.1397
248
+ 2025-09-18 20:08:25,314 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0778 | Val rms_score: 56.8111
249
+ 2025-09-18 20:08:31,608 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0766 | Val rms_score: 56.7268
250
+ 2025-09-18 20:08:37,369 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0774 | Val rms_score: 56.0142
251
+ 2025-09-18 20:08:43,369 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0692 | Val rms_score: 55.9543
252
+ 2025-09-18 20:08:49,595 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0614 | Val rms_score: 54.5612
253
+ 2025-09-18 20:08:55,857 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0977 | Val rms_score: 55.0210
254
+ 2025-09-18 20:09:02,066 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0588 | Val rms_score: 56.5370
255
+ 2025-09-18 20:09:12,822 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0618 | Val rms_score: 55.7142
256
+ 2025-09-18 20:09:13,872 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0502 | Val rms_score: 55.4641
257
+ 2025-09-18 20:09:20,429 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0536 | Val rms_score: 55.6020
258
+ 2025-09-18 20:09:25,408 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0577 | Val rms_score: 54.7361
259
+ 2025-09-18 20:09:31,704 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0554 | Val rms_score: 55.6590
260
+ 2025-09-18 20:09:38,197 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0618 | Val rms_score: 55.4720
261
+ 2025-09-18 20:09:43,720 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0441 | Val rms_score: 55.8740
262
+ 2025-09-18 20:09:49,964 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0517 | Val rms_score: 55.0026
263
+ 2025-09-18 20:09:56,295 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0536 | Val rms_score: 55.7240
264
+ 2025-09-18 20:10:00,492 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0543 | Val rms_score: 55.4706
265
+ 2025-09-18 20:10:06,810 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0528 | Val rms_score: 54.8751
266
+ 2025-09-18 20:10:12,935 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0532 | Val rms_score: 54.9900
267
+ 2025-09-18 20:10:17,729 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0536 | Val rms_score: 55.7495
268
+ 2025-09-18 20:10:24,282 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0477 | Val rms_score: 55.5288
269
+ 2025-09-18 20:10:30,547 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0430 | Val rms_score: 56.2561
270
+ 2025-09-18 20:10:35,884 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0456 | Val rms_score: 54.9387
271
+ 2025-09-18 20:10:42,437 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0445 | Val rms_score: 55.0050
272
+ 2025-09-18 20:10:48,512 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0326 | Val rms_score: 55.5279
273
+ 2025-09-18 20:10:54,342 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0402 | Val rms_score: 57.2854
274
+ 2025-09-18 20:10:58,703 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0480 | Val rms_score: 55.7844
275
+ 2025-09-18 20:11:04,894 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0513 | Val rms_score: 56.3211
276
+ 2025-09-18 20:11:11,429 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0435 | Val rms_score: 54.4806
277
+ 2025-09-18 20:11:17,717 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0525 | Val rms_score: 54.9603
278
+ 2025-09-18 20:11:23,537 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0469 | Val rms_score: 55.6763
279
+ 2025-09-18 20:11:29,824 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0487 | Val rms_score: 54.9869
280
+ 2025-09-18 20:11:34,310 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0480 | Val rms_score: 56.2527
281
+ 2025-09-18 20:11:40,959 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0499 | Val rms_score: 55.4054
282
+ 2025-09-18 20:11:47,111 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0469 | Val rms_score: 56.7236
283
+ 2025-09-18 20:11:51,966 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0428 | Val rms_score: 56.5658
284
+ 2025-09-18 20:11:57,771 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0476 | Val rms_score: 55.6646
285
+ 2025-09-18 20:12:04,019 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0372 | Val rms_score: 54.2266
286
+ 2025-09-18 20:12:09,884 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0378 | Val rms_score: 55.1521
287
+ 2025-09-18 20:12:16,194 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0382 | Val rms_score: 54.7433
288
+ 2025-09-18 20:12:22,515 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0353 | Val rms_score: 54.6575
289
+ 2025-09-18 20:12:28,333 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0391 | Val rms_score: 54.7971
290
+ 2025-09-18 20:12:33,636 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0389 | Val rms_score: 55.9420
291
+ 2025-09-18 20:12:40,313 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0464 | Val rms_score: 54.6538
292
+ 2025-09-18 20:12:46,661 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0426 | Val rms_score: 54.9007
293
+ 2025-09-18 20:12:51,999 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0350 | Val rms_score: 55.0297
294
+ 2025-09-18 20:12:57,752 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0296 | Val rms_score: 54.5114
295
+ 2025-09-18 20:13:04,000 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0316 | Val rms_score: 54.7350
296
+ 2025-09-18 20:13:08,686 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0374 | Val rms_score: 53.4719
297
+ 2025-09-18 20:13:14,933 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0391 | Val rms_score: 55.9387
298
+ 2025-09-18 20:13:21,129 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0285 | Val rms_score: 54.7058
299
+ 2025-09-18 20:13:26,033 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0329 | Val rms_score: 54.6346
300
+ 2025-09-18 20:13:32,432 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0326 | Val rms_score: 54.8063
301
+ 2025-09-18 20:13:38,510 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0387 | Val rms_score: 54.9931
302
+ 2025-09-18 20:13:43,808 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0299 | Val rms_score: 55.2770
303
+ 2025-09-18 20:13:49,599 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0257 | Val rms_score: 54.2790
304
+ 2025-09-18 20:13:55,885 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0255 | Val rms_score: 54.0790
305
+ 2025-09-18 20:14:02,095 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0329 | Val rms_score: 56.0468
306
+ 2025-09-18 20:14:07,431 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0319 | Val rms_score: 54.4033
307
+ 2025-09-18 20:14:13,422 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0409 | Val rms_score: 54.8108
308
+ 2025-09-18 20:14:19,735 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0322 | Val rms_score: 53.8515
309
+ 2025-09-18 20:14:24,060 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0301 | Val rms_score: 55.2587
310
+ 2025-09-18 20:14:30,369 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0209 | Val rms_score: 54.3769
311
+ 2025-09-18 20:14:36,937 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0307 | Val rms_score: 54.2423
312
+ 2025-09-18 20:14:41,778 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0316 | Val rms_score: 54.3311
313
+ 2025-09-18 20:14:48,098 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0316 | Val rms_score: 53.9750
314
+ 2025-09-18 20:14:54,288 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0283 | Val rms_score: 55.2343
315
+ 2025-09-18 20:14:59,188 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0285 | Val rms_score: 54.1745
316
+ 2025-09-18 20:15:05,857 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0259 | Val rms_score: 54.1710
317
+ 2025-09-18 20:15:12,313 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0251 | Val rms_score: 53.9590
318
+ 2025-09-18 20:15:17,457 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0327 | Val rms_score: 55.1298
319
+ 2025-09-18 20:15:23,129 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0320 | Val rms_score: 54.7620
320
+ 2025-09-18 20:15:29,276 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0455 | Val rms_score: 55.1292
321
+ 2025-09-18 20:15:36,149 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0311 | Val rms_score: 54.4848
322
+ 2025-09-18 20:15:42,359 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0270 | Val rms_score: 54.4079
323
+ 2025-09-18 20:15:48,251 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0262 | Val rms_score: 54.7768
324
+ 2025-09-18 20:15:54,896 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0233 | Val rms_score: 54.1240
325
+ 2025-09-18 20:16:01,663 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0255 | Val rms_score: 54.1520
326
+ 2025-09-18 20:16:08,242 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0318 | Val rms_score: 52.9838
327
+ 2025-09-18 20:16:13,923 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0237 | Val rms_score: 54.4060
328
+ 2025-09-18 20:16:18,383 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0242 | Val rms_score: 54.2633
329
+ 2025-09-18 20:16:24,725 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0292 | Val rms_score: 55.4440
330
+ 2025-09-18 20:16:25,407 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 38.0671
331
+ 2025-09-18 20:16:25,738 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 42.8010, Std Dev: 3.3475
logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_164756.log ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 16:47:56,575 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Running benchmark for dataset: delaney
2
+ 2025-09-18 16:47:56,575 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - dataset: delaney, tasks: ['measured_log_solubility_in_mols_per_litre'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 16:47:56,581 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset delaney at 2025-09-18_16-47-56
4
+ 2025-09-18 16:48:06,058 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2188 | Val rms_score: 1.0296
5
+ 2025-09-18 16:48:06,058 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
6
+ 2025-09-18 16:48:04,436 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0296
7
+ 2025-09-18 16:48:10,592 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0851 | Val rms_score: 0.9133
8
+ 2025-09-18 16:48:10,774 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
9
+ 2025-09-18 16:48:11,294 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.9133
10
+ 2025-09-18 16:48:17,763 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0630 | Val rms_score: 0.9766
11
+ 2025-09-18 16:48:24,154 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0552 | Val rms_score: 0.8472
12
+ 2025-09-18 16:48:24,330 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 116
13
+ 2025-09-18 16:48:24,879 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.8472
14
+ 2025-09-18 16:48:30,505 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0469 | Val rms_score: 0.8811
15
+ 2025-09-18 16:48:36,618 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0409 | Val rms_score: 0.8568
16
+ 2025-09-18 16:48:40,528 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0368 | Val rms_score: 0.8606
17
+ 2025-09-18 16:48:46,583 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0358 | Val rms_score: 0.8700
18
+ 2025-09-18 16:48:52,459 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0298 | Val rms_score: 0.8298
19
+ 2025-09-18 16:48:52,632 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
20
+ 2025-09-18 16:48:53,141 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.8298
21
+ 2025-09-18 16:48:59,216 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0244 | Val rms_score: 0.8866
22
+ 2025-09-18 16:49:05,168 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0228 | Val rms_score: 0.8626
23
+ 2025-09-18 16:49:08,742 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0259 | Val rms_score: 0.8065
24
+ 2025-09-18 16:49:08,923 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 348
25
+ 2025-09-18 16:49:09,459 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 0.8065
26
+ 2025-09-18 16:49:15,601 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0238 | Val rms_score: 0.8471
27
+ 2025-09-18 16:49:21,508 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0216 | Val rms_score: 0.8312
28
+ 2025-09-18 16:49:26,164 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0194 | Val rms_score: 0.8254
29
+ 2025-09-18 16:49:32,090 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0195 | Val rms_score: 0.8201
30
+ 2025-09-18 16:49:36,085 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0198 | Val rms_score: 0.8043
31
+ 2025-09-18 16:49:36,272 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 493
32
+ 2025-09-18 16:49:36,820 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val rms_score: 0.8043
33
+ 2025-09-18 16:49:41,690 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0211 | Val rms_score: 0.8842
34
+ 2025-09-18 16:49:47,736 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0210 | Val rms_score: 0.8781
35
+ 2025-09-18 16:49:53,804 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0211 | Val rms_score: 0.8684
36
+ 2025-09-18 16:49:59,676 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0263 | Val rms_score: 0.7890
37
+ 2025-09-18 16:50:00,209 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 609
38
+ 2025-09-18 16:50:00,781 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.7890
39
+ 2025-09-18 16:50:06,929 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0224 | Val rms_score: 0.8353
40
+ 2025-09-18 16:50:10,354 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0213 | Val rms_score: 0.8493
41
+ 2025-09-18 16:50:16,377 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0216 | Val rms_score: 0.8222
42
+ 2025-09-18 16:50:22,223 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0233 | Val rms_score: 0.9237
43
+ 2025-09-18 16:50:28,332 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0288 | Val rms_score: 0.8699
44
+ 2025-09-18 16:50:34,691 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0190 | Val rms_score: 0.8711
45
+ 2025-09-18 16:50:37,542 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0172 | Val rms_score: 0.8269
46
+ 2025-09-18 16:50:43,497 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0197 | Val rms_score: 0.8779
47
+ 2025-09-18 16:50:48,746 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0201 | Val rms_score: 0.8582
48
+ 2025-09-18 16:50:54,601 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0166 | Val rms_score: 0.8698
49
+ 2025-09-18 16:51:00,706 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0199 | Val rms_score: 0.8793
50
+ 2025-09-18 16:51:06,645 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0178 | Val rms_score: 0.8244
51
+ 2025-09-18 16:51:10,099 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0159 | Val rms_score: 0.8070
52
+ 2025-09-18 16:51:16,458 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0151 | Val rms_score: 0.8425
53
+ 2025-09-18 16:51:22,481 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0221 | Val rms_score: 0.8321
54
+ 2025-09-18 16:51:28,807 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0214 | Val rms_score: 0.8506
55
+ 2025-09-18 16:51:33,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0153 | Val rms_score: 0.8058
56
+ 2025-09-18 16:51:37,104 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0186 | Val rms_score: 0.8380
57
+ 2025-09-18 16:51:43,121 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0167 | Val rms_score: 0.8283
58
+ 2025-09-18 16:51:48,455 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0151 | Val rms_score: 0.8199
59
+ 2025-09-18 16:51:54,827 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0142 | Val rms_score: 0.8151
60
+ 2025-09-18 16:52:00,871 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0140 | Val rms_score: 0.8302
61
+ 2025-09-18 16:52:06,748 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0154 | Val rms_score: 0.8298
62
+ 2025-09-18 16:52:10,096 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0146 | Val rms_score: 0.8097
63
+ 2025-09-18 16:52:16,028 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0161 | Val rms_score: 0.8386
64
+ 2025-09-18 16:52:22,546 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0186 | Val rms_score: 0.8579
65
+ 2025-09-18 16:52:26,900 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0174 | Val rms_score: 0.8359
66
+ 2025-09-18 16:52:32,978 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0134 | Val rms_score: 0.8554
67
+ 2025-09-18 16:52:36,463 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0156 | Val rms_score: 0.8387
68
+ 2025-09-18 16:52:41,942 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0216 | Val rms_score: 0.8061
69
+ 2025-09-18 16:52:48,186 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0206 | Val rms_score: 0.7953
70
+ 2025-09-18 16:52:54,197 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0139 | Val rms_score: 0.8134
71
+ 2025-09-18 16:53:00,236 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0124 | Val rms_score: 0.7955
72
+ 2025-09-18 16:53:05,764 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0182 | Val rms_score: 0.8309
73
+ 2025-09-18 16:53:09,155 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0220 | Val rms_score: 0.7925
74
+ 2025-09-18 16:53:15,471 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0187 | Val rms_score: 0.8166
75
+ 2025-09-18 16:53:19,961 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0267 | Val rms_score: 0.7948
76
+ 2025-09-18 16:53:25,904 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0193 | Val rms_score: 0.8348
77
+ 2025-09-18 16:53:32,005 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0138 | Val rms_score: 0.8374
78
+ 2025-09-18 16:53:35,017 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0121 | Val rms_score: 0.8198
79
+ 2025-09-18 16:53:41,190 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0131 | Val rms_score: 0.8426
80
+ 2025-09-18 16:53:47,093 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0127 | Val rms_score: 0.8220
81
+ 2025-09-18 16:53:53,164 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0137 | Val rms_score: 0.8360
82
+ 2025-09-18 16:53:57,829 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0183 | Val rms_score: 0.8382
83
+ 2025-09-18 16:54:03,854 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0168 | Val rms_score: 0.8289
84
+ 2025-09-18 16:54:07,584 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0120 | Val rms_score: 0.8421
85
+ 2025-09-18 16:54:12,911 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0113 | Val rms_score: 0.8210
86
+ 2025-09-18 16:54:19,977 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0117 | Val rms_score: 0.8400
87
+ 2025-09-18 16:54:25,895 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0110 | Val rms_score: 0.8386
88
+ 2025-09-18 16:54:31,802 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0128 | Val rms_score: 0.8222
89
+ 2025-09-18 16:54:35,856 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0131 | Val rms_score: 0.8433
90
+ 2025-09-18 16:54:41,830 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0118 | Val rms_score: 0.8324
91
+ 2025-09-18 16:54:47,777 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0131 | Val rms_score: 0.8440
92
+ 2025-09-18 16:54:53,311 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0174 | Val rms_score: 0.8528
93
+ 2025-09-18 16:54:59,311 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0135 | Val rms_score: 0.8247
94
+ 2025-09-18 16:55:05,778 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0129 | Val rms_score: 0.8301
95
+ 2025-09-18 16:55:07,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0119 | Val rms_score: 0.8033
96
+ 2025-09-18 16:55:13,573 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0103 | Val rms_score: 0.8331
97
+ 2025-09-18 16:55:19,045 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0116 | Val rms_score: 0.8125
98
+ 2025-09-18 16:55:23,656 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0110 | Val rms_score: 0.8093
99
+ 2025-09-18 16:55:28,101 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0106 | Val rms_score: 0.8266
100
+ 2025-09-18 16:55:32,792 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0111 | Val rms_score: 0.8268
101
+ 2025-09-18 16:55:37,410 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0121 | Val rms_score: 0.8486
102
+ 2025-09-18 16:55:39,748 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0127 | Val rms_score: 0.8641
103
+ 2025-09-18 16:55:45,329 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0176 | Val rms_score: 0.7935
104
+ 2025-09-18 16:55:51,057 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0142 | Val rms_score: 0.8356
105
+ 2025-09-18 16:55:57,231 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0129 | Val rms_score: 0.8232
106
+ 2025-09-18 16:56:03,068 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0106 | Val rms_score: 0.8128
107
+ 2025-09-18 16:56:05,533 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0142 | Val rms_score: 0.8614
108
+ 2025-09-18 16:56:09,573 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0145 | Val rms_score: 0.8405
109
+ 2025-09-18 16:56:14,797 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0128 | Val rms_score: 0.8019
110
+ 2025-09-18 16:56:20,461 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0112 | Val rms_score: 0.8259
111
+ 2025-09-18 16:56:26,249 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0127 | Val rms_score: 0.8086
112
+ 2025-09-18 16:56:31,359 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0144 | Val rms_score: 0.8173
113
+ 2025-09-18 16:56:36,512 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0113 | Val rms_score: 0.8341
114
+ 2025-09-18 16:56:39,338 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0117 | Val rms_score: 0.8254
115
+ 2025-09-18 16:56:43,549 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0139 | Val rms_score: 0.8368
116
+ 2025-09-18 16:56:48,209 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0147 | Val rms_score: 0.8586
117
+ 2025-09-18 16:56:53,167 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0134 | Val rms_score: 0.8245
118
+ 2025-09-18 16:56:53,891 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.8012
119
+ 2025-09-18 16:56:54,204 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset delaney at 2025-09-18_16-56-54
120
+ 2025-09-18 16:56:58,562 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2780 | Val rms_score: 1.0240
121
+ 2025-09-18 16:56:58,562 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
122
+ 2025-09-18 16:56:59,214 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0240
123
+ 2025-09-18 16:57:04,697 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0835 | Val rms_score: 0.9073
124
+ 2025-09-18 16:57:04,864 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
125
+ 2025-09-18 16:57:05,413 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.9073
126
+ 2025-09-18 16:57:08,231 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0644 | Val rms_score: 0.8987
127
+ 2025-09-18 16:57:08,418 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
128
+ 2025-09-18 16:57:09,029 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.8987
129
+ 2025-09-18 16:57:14,651 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0552 | Val rms_score: 0.9579
130
+ 2025-09-18 16:57:19,923 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0531 | Val rms_score: 0.8697
131
+ 2025-09-18 16:57:20,112 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
132
+ 2025-09-18 16:57:20,703 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.8697
133
+ 2025-09-18 16:57:25,605 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0439 | Val rms_score: 0.8269
134
+ 2025-09-18 16:57:26,073 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
135
+ 2025-09-18 16:57:26,634 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.8269
136
+ 2025-09-18 16:57:31,874 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0260 | Val rms_score: 0.8571
137
+ 2025-09-18 16:57:36,940 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0326 | Val rms_score: 0.8384
138
+ 2025-09-18 16:57:38,626 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0345 | Val rms_score: 0.8691
139
+ 2025-09-18 16:57:43,390 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0290 | Val rms_score: 0.8557
140
+ 2025-09-18 16:57:48,055 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0265 | Val rms_score: 0.8337
141
+ 2025-09-18 16:57:53,046 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0280 | Val rms_score: 0.8910
142
+ 2025-09-18 16:57:58,178 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0335 | Val rms_score: 0.9015
143
+ 2025-09-18 16:58:03,425 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0234 | Val rms_score: 0.8871
144
+ 2025-09-18 16:58:05,685 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0222 | Val rms_score: 0.8467
145
+ 2025-09-18 16:58:10,759 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0237 | Val rms_score: 0.8430
146
+ 2025-09-18 16:58:15,906 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0229 | Val rms_score: 0.8623
147
+ 2025-09-18 16:58:21,177 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0238 | Val rms_score: 0.8463
148
+ 2025-09-18 16:58:26,445 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0202 | Val rms_score: 0.8261
149
+ 2025-09-18 16:58:26,592 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 551
150
+ 2025-09-18 16:58:27,142 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 19 with val rms_score: 0.8261
151
+ 2025-09-18 16:58:31,957 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0205 | Val rms_score: 0.8346
152
+ 2025-09-18 16:58:36,860 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0184 | Val rms_score: 0.8460
153
+ 2025-09-18 16:58:39,289 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0161 | Val rms_score: 0.8721
154
+ 2025-09-18 16:58:44,181 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0190 | Val rms_score: 0.8276
155
+ 2025-09-18 16:58:49,146 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0228 | Val rms_score: 0.8538
156
+ 2025-09-18 16:58:54,101 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0206 | Val rms_score: 0.8898
157
+ 2025-09-18 16:58:59,074 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0284 | Val rms_score: 0.8971
158
+ 2025-09-18 16:59:04,161 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0206 | Val rms_score: 0.8844
159
+ 2025-09-18 16:59:06,479 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0269 | Val rms_score: 0.8396
160
+ 2025-09-18 16:59:11,529 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0292 | Val rms_score: 0.8515
161
+ 2025-09-18 16:59:16,725 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0302 | Val rms_score: 0.8389
162
+ 2025-09-18 16:59:21,872 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0232 | Val rms_score: 0.8692
163
+ 2025-09-18 16:59:27,242 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0188 | Val rms_score: 0.8293
164
+ 2025-09-18 16:59:32,663 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0166 | Val rms_score: 0.8704
165
+ 2025-09-18 16:59:35,749 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0185 | Val rms_score: 0.8624
166
+ 2025-09-18 16:59:42,232 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0165 | Val rms_score: 0.8331
167
+ 2025-09-18 16:59:46,728 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0145 | Val rms_score: 0.8500
168
+ 2025-09-18 16:59:51,526 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0152 | Val rms_score: 0.8235
169
+ 2025-09-18 16:59:51,673 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1073
170
+ 2025-09-18 16:59:52,254 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 37 with val rms_score: 0.8235
171
+ 2025-09-18 16:59:57,415 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0215 | Val rms_score: 0.8274
172
+ 2025-09-18 17:00:02,327 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0162 | Val rms_score: 0.8653
173
+ 2025-09-18 17:00:07,137 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0182 | Val rms_score: 0.8890
174
+ 2025-09-18 17:00:09,446 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0238 | Val rms_score: 0.8710
175
+ 2025-09-18 17:00:14,505 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0235 | Val rms_score: 0.8336
176
+ 2025-09-18 17:00:19,356 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0182 | Val rms_score: 0.8536
177
+ 2025-09-18 17:00:24,111 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0206 | Val rms_score: 0.8309
178
+ 2025-09-18 17:00:29,006 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0104 | Val rms_score: 0.8178
179
+ 2025-09-18 17:00:29,169 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1305
180
+ 2025-09-18 17:00:29,815 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 45 with val rms_score: 0.8178
181
+ 2025-09-18 17:00:35,120 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0145 | Val rms_score: 0.8361
182
+ 2025-09-18 17:00:38,160 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0131 | Val rms_score: 0.8325
183
+ 2025-09-18 17:00:43,355 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0156 | Val rms_score: 0.8504
184
+ 2025-09-18 17:00:48,588 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0152 | Val rms_score: 0.8344
185
+ 2025-09-18 17:00:53,778 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0139 | Val rms_score: 0.8202
186
+ 2025-09-18 17:00:59,049 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0137 | Val rms_score: 0.8544
187
+ 2025-09-18 17:01:04,555 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0134 | Val rms_score: 0.8334
188
+ 2025-09-18 17:01:07,056 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0137 | Val rms_score: 0.8107
189
+ 2025-09-18 17:01:07,219 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1537
190
+ 2025-09-18 17:01:07,808 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 53 with val rms_score: 0.8107
191
+ 2025-09-18 17:01:13,518 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0152 | Val rms_score: 0.8088
192
+ 2025-09-18 17:01:13,737 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1566
193
+ 2025-09-18 17:01:14,327 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 54 with val rms_score: 0.8088
194
+ 2025-09-18 17:01:20,542 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0210 | Val rms_score: 0.7807
195
+ 2025-09-18 17:01:20,734 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1595
196
+ 2025-09-18 17:01:21,303 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 55 with val rms_score: 0.7807
197
+ 2025-09-18 17:01:26,662 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0193 | Val rms_score: 0.8180
198
+ 2025-09-18 17:01:32,679 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0175 | Val rms_score: 0.8164
199
+ 2025-09-18 17:01:41,147 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0156 | Val rms_score: 0.8246
200
+ 2025-09-18 17:01:41,479 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0144 | Val rms_score: 0.8194
201
+ 2025-09-18 17:01:47,519 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0134 | Val rms_score: 0.8371
202
+ 2025-09-18 17:01:53,503 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0128 | Val rms_score: 0.8303
203
+ 2025-09-18 17:01:59,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0134 | Val rms_score: 0.8643
204
+ 2025-09-18 17:02:05,096 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0139 | Val rms_score: 0.8545
205
+ 2025-09-18 17:02:08,207 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0127 | Val rms_score: 0.8623
206
+ 2025-09-18 17:02:13,513 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0131 | Val rms_score: 0.8145
207
+ 2025-09-18 17:02:18,701 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0170 | Val rms_score: 0.8419
208
+ 2025-09-18 17:02:24,277 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0158 | Val rms_score: 0.8650
209
+ 2025-09-18 17:02:29,312 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0210 | Val rms_score: 0.8558
210
+ 2025-09-18 17:02:35,471 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0133 | Val rms_score: 0.8328
211
+ 2025-09-18 17:02:38,977 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0158 | Val rms_score: 0.8416
212
+ 2025-09-18 17:02:44,767 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0160 | Val rms_score: 0.8024
213
+ 2025-09-18 17:02:50,739 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0125 | Val rms_score: 0.8281
214
+ 2025-09-18 17:02:56,445 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0135 | Val rms_score: 0.8408
215
+ 2025-09-18 17:03:02,121 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0124 | Val rms_score: 0.8229
216
+ 2025-09-18 17:03:07,576 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0108 | Val rms_score: 0.8355
217
+ 2025-09-18 17:03:09,605 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0106 | Val rms_score: 0.8374
218
+ 2025-09-18 17:03:14,323 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0122 | Val rms_score: 0.8092
219
+ 2025-09-18 17:03:19,493 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0130 | Val rms_score: 0.8191
220
+ 2025-09-18 17:03:24,449 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0120 | Val rms_score: 0.8150
221
+ 2025-09-18 17:03:29,681 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0154 | Val rms_score: 0.8413
222
+ 2025-09-18 17:03:34,867 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0147 | Val rms_score: 0.8108
223
+ 2025-09-18 17:03:38,447 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0125 | Val rms_score: 0.8271
224
+ 2025-09-18 17:03:43,551 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0121 | Val rms_score: 0.8039
225
+ 2025-09-18 17:03:49,653 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0123 | Val rms_score: 0.8027
226
+ 2025-09-18 17:03:54,942 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0125 | Val rms_score: 0.8042
227
+ 2025-09-18 17:04:00,344 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0128 | Val rms_score: 0.8462
228
+ 2025-09-18 17:04:05,726 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0124 | Val rms_score: 0.8381
229
+ 2025-09-18 17:04:07,495 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0143 | Val rms_score: 0.8359
230
+ 2025-09-18 17:04:12,503 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0115 | Val rms_score: 0.8368
231
+ 2025-09-18 17:04:17,021 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0122 | Val rms_score: 0.8418
232
+ 2025-09-18 17:04:22,501 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0118 | Val rms_score: 0.8190
233
+ 2025-09-18 17:04:28,165 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0139 | Val rms_score: 0.8012
234
+ 2025-09-18 17:04:33,264 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0110 | Val rms_score: 0.8124
235
+ 2025-09-18 17:04:36,484 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0130 | Val rms_score: 0.8477
236
+ 2025-09-18 17:04:41,929 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0121 | Val rms_score: 0.8128
237
+ 2025-09-18 17:04:46,904 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0106 | Val rms_score: 0.8305
238
+ 2025-09-18 17:04:52,413 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0121 | Val rms_score: 0.8304
239
+ 2025-09-18 17:04:57,583 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0118 | Val rms_score: 0.8625
240
+ 2025-09-18 17:05:02,300 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0125 | Val rms_score: 0.8159
241
+ 2025-09-18 17:05:07,004 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0125 | Val rms_score: 0.8438
242
+ 2025-09-18 17:05:07,741 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.8546
243
+ 2025-09-18 17:05:08,124 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset delaney at 2025-09-18_17-05-08
244
+ 2025-09-18 17:05:10,514 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2522 | Val rms_score: 1.0093
245
+ 2025-09-18 17:05:10,515 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
246
+ 2025-09-18 17:05:11,234 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0093
247
+ 2025-09-18 17:05:16,175 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0835 | Val rms_score: 0.9147
248
+ 2025-09-18 17:05:16,355 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
249
+ 2025-09-18 17:05:16,975 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.9147
250
+ 2025-09-18 17:05:22,319 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0628 | Val rms_score: 0.8887
251
+ 2025-09-18 17:05:22,531 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
252
+ 2025-09-18 17:05:23,130 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.8887
253
+ 2025-09-18 17:05:28,759 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0493 | Val rms_score: 0.8874
254
+ 2025-09-18 17:05:28,929 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 116
255
+ 2025-09-18 17:05:29,493 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.8874
256
+ 2025-09-18 17:05:35,591 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0453 | Val rms_score: 0.8694
257
+ 2025-09-18 17:05:35,774 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
258
+ 2025-09-18 17:05:36,320 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.8694
259
+ 2025-09-18 17:05:39,713 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0342 | Val rms_score: 0.8521
260
+ 2025-09-18 17:05:40,224 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
261
+ 2025-09-18 17:05:40,780 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.8521
262
+ 2025-09-18 17:05:46,505 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0423 | Val rms_score: 0.8537
263
+ 2025-09-18 17:05:52,149 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0388 | Val rms_score: 0.8437
264
+ 2025-09-18 17:05:52,310 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 232
265
+ 2025-09-18 17:05:52,926 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.8437
266
+ 2025-09-18 17:05:57,953 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0273 | Val rms_score: 0.8418
267
+ 2025-09-18 17:05:58,134 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
268
+ 2025-09-18 17:05:58,674 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.8418
269
+ 2025-09-18 17:06:03,537 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0275 | Val rms_score: 0.8288
270
+ 2025-09-18 17:06:03,713 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 290
271
+ 2025-09-18 17:06:04,284 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 0.8288
272
+ 2025-09-18 17:06:07,058 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0282 | Val rms_score: 0.8382
273
+ 2025-09-18 17:06:12,818 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0287 | Val rms_score: 0.8297
274
+ 2025-09-18 17:06:17,650 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0257 | Val rms_score: 0.8490
275
+ 2025-09-18 17:06:22,645 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0319 | Val rms_score: 0.8125
276
+ 2025-09-18 17:06:22,829 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 406
277
+ 2025-09-18 17:06:23,405 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.8125
278
+ 2025-09-18 17:06:28,929 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0279 | Val rms_score: 0.8136
279
+ 2025-09-18 17:06:34,229 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0218 | Val rms_score: 0.8459
280
+ 2025-09-18 17:06:37,644 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0213 | Val rms_score: 0.8245
281
+ 2025-09-18 17:06:42,690 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0238 | Val rms_score: 0.8659
282
+ 2025-09-18 17:06:47,708 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0187 | Val rms_score: 0.8202
283
+ 2025-09-18 17:06:52,498 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0236 | Val rms_score: 0.8144
284
+ 2025-09-18 17:06:57,478 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0199 | Val rms_score: 0.8409
285
+ 2025-09-18 17:07:02,927 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0259 | Val rms_score: 0.8305
286
+ 2025-09-18 17:07:07,470 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0283 | Val rms_score: 0.8066
287
+ 2025-09-18 17:07:07,618 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 667
288
+ 2025-09-18 17:07:08,194 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 23 with val rms_score: 0.8066
289
+ 2025-09-18 17:07:10,676 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0264 | Val rms_score: 0.8815
290
+ 2025-09-18 17:07:15,381 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0317 | Val rms_score: 0.8304
291
+ 2025-09-18 17:07:20,241 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0214 | Val rms_score: 0.8456
292
+ 2025-09-18 17:07:25,761 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0217 | Val rms_score: 0.7842
293
+ 2025-09-18 17:07:25,946 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 783
294
+ 2025-09-18 17:07:26,532 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val rms_score: 0.7842
295
+ 2025-09-18 17:07:31,832 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0159 | Val rms_score: 0.7730
296
+ 2025-09-18 17:07:32,021 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 812
297
+ 2025-09-18 17:07:32,572 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val rms_score: 0.7730
298
+ 2025-09-18 17:07:38,057 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0166 | Val rms_score: 0.8036
299
+ 2025-09-18 17:07:40,606 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0162 | Val rms_score: 0.8039
300
+ 2025-09-18 17:07:45,650 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0169 | Val rms_score: 0.8040
301
+ 2025-09-18 17:07:51,242 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0265 | Val rms_score: 0.8164
302
+ 2025-09-18 17:07:56,030 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0205 | Val rms_score: 0.8641
303
+ 2025-09-18 17:08:00,773 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0202 | Val rms_score: 0.8053
304
+ 2025-09-18 17:08:07,153 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0199 | Val rms_score: 0.7878
305
+ 2025-09-18 17:08:09,813 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0169 | Val rms_score: 0.8236
306
+ 2025-09-18 17:08:15,556 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0161 | Val rms_score: 0.8206
307
+ 2025-09-18 17:08:20,709 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0151 | Val rms_score: 0.8368
308
+ 2025-09-18 17:08:25,760 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0164 | Val rms_score: 0.7968
309
+ 2025-09-18 17:08:30,690 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0137 | Val rms_score: 0.7932
310
+ 2025-09-18 17:08:35,493 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0154 | Val rms_score: 0.8041
311
+ 2025-09-18 17:08:38,342 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0208 | Val rms_score: 0.8134
312
+ 2025-09-18 17:08:43,030 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0165 | Val rms_score: 0.8085
313
+ 2025-09-18 17:08:47,880 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0159 | Val rms_score: 0.8090
314
+ 2025-09-18 17:08:52,815 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0156 | Val rms_score: 0.8162
315
+ 2025-09-18 17:08:57,524 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0137 | Val rms_score: 0.8593
316
+ 2025-09-18 17:09:02,807 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0128 | Val rms_score: 0.8265
317
+ 2025-09-18 17:09:07,331 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0166 | Val rms_score: 0.8379
318
+ 2025-09-18 17:09:09,508 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0175 | Val rms_score: 0.8362
319
+ 2025-09-18 17:09:14,241 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0205 | Val rms_score: 0.8588
320
+ 2025-09-18 17:09:19,028 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0167 | Val rms_score: 0.8278
321
+ 2025-09-18 17:09:23,953 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0181 | Val rms_score: 0.8160
322
+ 2025-09-18 17:09:28,422 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0175 | Val rms_score: 0.8464
323
+ 2025-09-18 17:09:33,103 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0199 | Val rms_score: 0.8256
324
+ 2025-09-18 17:09:37,975 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0156 | Val rms_score: 0.8236
325
+ 2025-09-18 17:09:40,499 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0135 | Val rms_score: 0.8316
326
+ 2025-09-18 17:09:45,727 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0135 | Val rms_score: 0.8598
327
+ 2025-09-18 17:09:50,390 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0186 | Val rms_score: 0.8137
328
+ 2025-09-18 17:09:55,438 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0175 | Val rms_score: 0.8140
329
+ 2025-09-18 17:10:00,798 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0178 | Val rms_score: 0.8018
330
+ 2025-09-18 17:10:06,310 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0168 | Val rms_score: 0.7953
331
+ 2025-09-18 17:10:09,419 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0134 | Val rms_score: 0.7966
332
+ 2025-09-18 17:10:14,593 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0158 | Val rms_score: 0.8172
333
+ 2025-09-18 17:10:20,546 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0141 | Val rms_score: 0.7985
334
+ 2025-09-18 17:10:26,400 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0114 | Val rms_score: 0.7992
335
+ 2025-09-18 17:10:32,364 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0137 | Val rms_score: 0.8003
336
+ 2025-09-18 17:10:38,826 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0142 | Val rms_score: 0.8194
337
+ 2025-09-18 17:10:42,238 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0145 | Val rms_score: 0.8294
338
+ 2025-09-18 17:10:48,474 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0093 | Val rms_score: 0.7903
339
+ 2025-09-18 17:10:54,590 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0125 | Val rms_score: 0.8286
340
+ 2025-09-18 17:11:00,584 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0129 | Val rms_score: 0.8127
341
+ 2025-09-18 17:11:07,037 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0133 | Val rms_score: 0.8127
342
+ 2025-09-18 17:11:10,642 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0165 | Val rms_score: 0.8196
343
+ 2025-09-18 17:11:16,028 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0145 | Val rms_score: 0.8269
344
+ 2025-09-18 17:11:21,516 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0131 | Val rms_score: 0.8212
345
+ 2025-09-18 17:11:26,752 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0127 | Val rms_score: 0.8119
346
+ 2025-09-18 17:11:32,648 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0133 | Val rms_score: 0.8456
347
+ 2025-09-18 17:11:37,244 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0195 | Val rms_score: 0.8018
348
+ 2025-09-18 17:11:39,113 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0149 | Val rms_score: 0.8181
349
+ 2025-09-18 17:11:44,137 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0135 | Val rms_score: 0.8259
350
+ 2025-09-18 17:11:49,252 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0106 | Val rms_score: 0.8070
351
+ 2025-09-18 17:11:54,633 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0111 | Val rms_score: 0.8136
352
+ 2025-09-18 17:11:59,756 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0097 | Val rms_score: 0.8089
353
+ 2025-09-18 17:12:05,720 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0100 | Val rms_score: 0.7968
354
+ 2025-09-18 17:12:09,051 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0121 | Val rms_score: 0.8640
355
+ 2025-09-18 17:12:14,805 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0143 | Val rms_score: 0.8568
356
+ 2025-09-18 17:12:21,106 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0119 | Val rms_score: 0.8333
357
+ 2025-09-18 17:12:26,157 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0122 | Val rms_score: 0.8328
358
+ 2025-09-18 17:12:31,440 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0105 | Val rms_score: 0.8227
359
+ 2025-09-18 17:12:35,540 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0115 | Val rms_score: 0.8197
360
+ 2025-09-18 17:12:38,039 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0124 | Val rms_score: 0.8258
361
+ 2025-09-18 17:12:43,110 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0134 | Val rms_score: 0.8260
362
+ 2025-09-18 17:12:48,008 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0108 | Val rms_score: 0.8340
363
+ 2025-09-18 17:12:52,802 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0103 | Val rms_score: 0.8281
364
+ 2025-09-18 17:12:57,257 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0114 | Val rms_score: 0.8162
365
+ 2025-09-18 17:13:02,208 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0131 | Val rms_score: 0.8471
366
+ 2025-09-18 17:13:07,423 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0144 | Val rms_score: 0.8275
367
+ 2025-09-18 17:13:09,753 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0133 | Val rms_score: 0.8353
368
+ 2025-09-18 17:13:14,499 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0108 | Val rms_score: 0.8211
369
+ 2025-09-18 17:13:19,094 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0115 | Val rms_score: 0.8236
370
+ 2025-09-18 17:13:19,787 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.7949
371
+ 2025-09-18 17:13:20,099 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.8169, Std Dev: 0.0268
logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_174904.log ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 17:49:04,095 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Running benchmark for dataset: freesolv
2
+ 2025-09-18 17:49:04,095 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - dataset: freesolv, tasks: ['y'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 17:49:04,099 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset freesolv at 2025-09-18_17-49-04
4
+ 2025-09-18 17:49:06,746 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.4430 | Val rms_score: 1.1799
5
+ 2025-09-18 17:49:06,746 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
6
+ 2025-09-18 17:49:07,287 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.1799
7
+ 2025-09-18 17:49:11,122 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1654 | Val rms_score: 1.0810
8
+ 2025-09-18 17:49:11,303 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
9
+ 2025-09-18 17:49:11,880 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0810
10
+ 2025-09-18 17:49:13,164 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0896 | Val rms_score: 0.9292
11
+ 2025-09-18 17:49:13,341 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
12
+ 2025-09-18 17:49:13,872 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.9292
13
+ 2025-09-18 17:49:17,654 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0859 | Val rms_score: 0.9205
14
+ 2025-09-18 17:49:17,829 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
15
+ 2025-09-18 17:49:18,377 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.9205
16
+ 2025-09-18 17:49:21,451 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0790 | Val rms_score: 0.9047
17
+ 2025-09-18 17:49:21,624 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
18
+ 2025-09-18 17:49:22,161 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.9047
19
+ 2025-09-18 17:49:25,119 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0214 | Val rms_score: 0.8547
20
+ 2025-09-18 17:49:25,670 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 102
21
+ 2025-09-18 17:49:26,230 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.8547
22
+ 2025-09-18 17:49:29,318 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0370 | Val rms_score: 0.8581
23
+ 2025-09-18 17:49:33,145 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0264 | Val rms_score: 0.8288
24
+ 2025-09-18 17:49:33,319 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 136
25
+ 2025-09-18 17:49:33,858 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.8288
26
+ 2025-09-18 17:49:37,906 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0248 | Val rms_score: 0.8592
27
+ 2025-09-18 17:49:41,581 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0272 | Val rms_score: 0.8867
28
+ 2025-09-18 17:49:42,158 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0312 | Val rms_score: 0.8122
29
+ 2025-09-18 17:49:42,657 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 187
30
+ 2025-09-18 17:49:43,188 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.8122
31
+ 2025-09-18 17:49:46,492 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0304 | Val rms_score: 0.8797
32
+ 2025-09-18 17:49:49,870 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0377 | Val rms_score: 0.8382
33
+ 2025-09-18 17:49:53,723 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0261 | Val rms_score: 0.8418
34
+ 2025-09-18 17:49:57,156 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0241 | Val rms_score: 0.8358
35
+ 2025-09-18 17:50:00,315 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0196 | Val rms_score: 0.8567
36
+ 2025-09-18 17:50:04,134 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0161 | Val rms_score: 0.8214
37
+ 2025-09-18 17:50:07,764 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0143 | Val rms_score: 0.8263
38
+ 2025-09-18 17:50:11,063 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0155 | Val rms_score: 0.8586
39
+ 2025-09-18 17:50:11,837 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0176 | Val rms_score: 0.8417
40
+ 2025-09-18 17:50:15,559 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0144 | Val rms_score: 0.8087
41
+ 2025-09-18 17:50:16,033 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 357
42
+ 2025-09-18 17:50:16,571 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.8087
43
+ 2025-09-18 17:50:20,683 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0211 | Val rms_score: 0.8816
44
+ 2025-09-18 17:50:24,352 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0356 | Val rms_score: 0.7937
45
+ 2025-09-18 17:50:24,531 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 391
46
+ 2025-09-18 17:50:25,156 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 23 with val rms_score: 0.7937
47
+ 2025-09-18 17:50:28,680 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0194 | Val rms_score: 0.8771
48
+ 2025-09-18 17:50:31,711 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0179 | Val rms_score: 0.8857
49
+ 2025-09-18 17:50:35,314 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0163 | Val rms_score: 0.8358
50
+ 2025-09-18 17:50:39,401 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0168 | Val rms_score: 0.8448
51
+ 2025-09-18 17:50:40,242 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0129 | Val rms_score: 0.8443
52
+ 2025-09-18 17:50:43,485 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0165 | Val rms_score: 0.8401
53
+ 2025-09-18 17:50:47,172 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0114 | Val rms_score: 0.8769
54
+ 2025-09-18 17:50:50,856 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0276 | Val rms_score: 0.9459
55
+ 2025-09-18 17:50:54,573 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1608 | Val rms_score: 0.9910
56
+ 2025-09-18 17:50:57,706 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0740 | Val rms_score: 0.9596
57
+ 2025-09-18 17:51:01,457 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0381 | Val rms_score: 0.9212
58
+ 2025-09-18 17:51:05,155 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0342 | Val rms_score: 0.9601
59
+ 2025-09-18 17:51:08,233 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0511 | Val rms_score: 0.8726
60
+ 2025-09-18 17:51:11,782 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0200 | Val rms_score: 0.8223
61
+ 2025-09-18 17:51:12,990 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0154 | Val rms_score: 0.8309
62
+ 2025-09-18 17:51:16,637 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0124 | Val rms_score: 0.8328
63
+ 2025-09-18 17:51:19,595 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0118 | Val rms_score: 0.8155
64
+ 2025-09-18 17:51:23,290 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0106 | Val rms_score: 0.8335
65
+ 2025-09-18 17:51:27,382 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0102 | Val rms_score: 0.8171
66
+ 2025-09-18 17:51:30,657 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0140 | Val rms_score: 0.8682
67
+ 2025-09-18 17:51:33,862 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0287 | Val rms_score: 0.8227
68
+ 2025-09-18 17:51:37,546 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0172 | Val rms_score: 0.8410
69
+ 2025-09-18 17:51:40,951 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0132 | Val rms_score: 0.8358
70
+ 2025-09-18 17:51:41,944 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0117 | Val rms_score: 0.8089
71
+ 2025-09-18 17:51:45,358 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0182 | Val rms_score: 0.8237
72
+ 2025-09-18 17:51:48,829 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0147 | Val rms_score: 0.8204
73
+ 2025-09-18 17:51:51,785 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0097 | Val rms_score: 0.8075
74
+ 2025-09-18 17:51:55,505 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0094 | Val rms_score: 0.8020
75
+ 2025-09-18 17:51:59,558 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0194 | Val rms_score: 0.8200
76
+ 2025-09-18 17:52:02,571 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0081 | Val rms_score: 0.8112
77
+ 2025-09-18 17:52:05,819 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0322 | Val rms_score: 0.8982
78
+ 2025-09-18 17:52:09,276 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0276 | Val rms_score: 0.8276
79
+ 2025-09-18 17:52:12,300 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0215 | Val rms_score: 0.8332
80
+ 2025-09-18 17:52:13,990 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0226 | Val rms_score: 0.8550
81
+ 2025-09-18 17:52:17,511 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0446 | Val rms_score: 0.8323
82
+ 2025-09-18 17:52:21,710 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0129 | Val rms_score: 0.8422
83
+ 2025-09-18 17:52:24,583 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0105 | Val rms_score: 0.8521
84
+ 2025-09-18 17:52:28,298 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0136 | Val rms_score: 0.8344
85
+ 2025-09-18 17:52:32,163 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0090 | Val rms_score: 0.8337
86
+ 2025-09-18 17:52:35,141 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0076 | Val rms_score: 0.8454
87
+ 2025-09-18 17:52:38,905 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0078 | Val rms_score: 0.8402
88
+ 2025-09-18 17:52:42,549 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0066 | Val rms_score: 0.8421
89
+ 2025-09-18 17:52:42,974 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0065 | Val rms_score: 0.8310
90
+ 2025-09-18 17:52:47,196 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0064 | Val rms_score: 0.8313
91
+ 2025-09-18 17:52:50,876 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0068 | Val rms_score: 0.8282
92
+ 2025-09-18 17:52:53,752 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0068 | Val rms_score: 0.8279
93
+ 2025-09-18 17:52:57,385 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0086 | Val rms_score: 0.8380
94
+ 2025-09-18 17:53:00,408 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0075 | Val rms_score: 0.8234
95
+ 2025-09-18 17:53:03,897 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0071 | Val rms_score: 0.8368
96
+ 2025-09-18 17:53:07,401 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0105 | Val rms_score: 0.8260
97
+ 2025-09-18 17:53:11,155 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0242 | Val rms_score: 0.7826
98
+ 2025-09-18 17:53:11,319 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1258
99
+ 2025-09-18 17:53:11,895 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 74 with val rms_score: 0.7826
100
+ 2025-09-18 17:53:13,260 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0227 | Val rms_score: 0.9412
101
+ 2025-09-18 17:53:16,122 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.1052 | Val rms_score: 1.0087
102
+ 2025-09-18 17:53:20,070 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0380 | Val rms_score: 0.9859
103
+ 2025-09-18 17:53:23,807 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0294 | Val rms_score: 0.9937
104
+ 2025-09-18 17:53:26,821 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0210 | Val rms_score: 0.9653
105
+ 2025-09-18 17:53:30,266 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0164 | Val rms_score: 0.9661
106
+ 2025-09-18 17:53:34,013 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0153 | Val rms_score: 0.9553
107
+ 2025-09-18 17:53:38,214 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0156 | Val rms_score: 0.9458
108
+ 2025-09-18 17:53:41,225 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0098 | Val rms_score: 0.9321
109
+ 2025-09-18 17:53:42,246 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0099 | Val rms_score: 0.9248
110
+ 2025-09-18 17:53:45,658 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0110 | Val rms_score: 0.9280
111
+ 2025-09-18 17:53:48,732 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0096 | Val rms_score: 0.9216
112
+ 2025-09-18 17:53:52,909 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0094 | Val rms_score: 0.9308
113
+ 2025-09-18 17:53:56,622 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0083 | Val rms_score: 0.9083
114
+ 2025-09-18 17:53:59,718 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0079 | Val rms_score: 0.9083
115
+ 2025-09-18 17:54:03,240 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0093 | Val rms_score: 0.9166
116
+ 2025-09-18 17:54:06,948 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0080 | Val rms_score: 0.9195
117
+ 2025-09-18 17:54:10,586 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0066 | Val rms_score: 0.9101
118
+ 2025-09-18 17:54:11,304 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0069 | Val rms_score: 0.9125
119
+ 2025-09-18 17:54:15,018 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0069 | Val rms_score: 0.9129
120
+ 2025-09-18 17:54:18,472 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0083 | Val rms_score: 0.9139
121
+ 2025-09-18 17:54:22,090 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0088 | Val rms_score: 0.9058
122
+ 2025-09-18 17:54:26,135 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0132 | Val rms_score: 0.9058
123
+ 2025-09-18 17:54:29,181 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0105 | Val rms_score: 0.9044
124
+ 2025-09-18 17:54:32,441 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0076 | Val rms_score: 0.9269
125
+ 2025-09-18 17:54:36,215 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0061 | Val rms_score: 0.9132
126
+ 2025-09-18 17:54:36,775 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5618
127
+ 2025-09-18 17:54:37,141 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset freesolv at 2025-09-18_17-54-37
128
+ 2025-09-18 17:54:40,313 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.4375 | Val rms_score: 1.1759
129
+ 2025-09-18 17:54:40,314 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
130
+ 2025-09-18 17:54:41,015 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.1759
131
+ 2025-09-18 17:54:42,137 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1517 | Val rms_score: 1.0164
132
+ 2025-09-18 17:54:42,316 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
133
+ 2025-09-18 17:54:42,868 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0164
134
+ 2025-09-18 17:54:46,368 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0836 | Val rms_score: 0.9482
135
+ 2025-09-18 17:54:46,542 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
136
+ 2025-09-18 17:54:47,081 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.9482
137
+ 2025-09-18 17:54:50,148 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0577 | Val rms_score: 0.9280
138
+ 2025-09-18 17:54:50,336 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
139
+ 2025-09-18 17:54:50,897 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.9280
140
+ 2025-09-18 17:54:53,857 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0549 | Val rms_score: 0.9223
141
+ 2025-09-18 17:54:54,043 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
142
+ 2025-09-18 17:54:54,580 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.9223
143
+ 2025-09-18 17:54:57,642 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0266 | Val rms_score: 0.9227
144
+ 2025-09-18 17:55:01,431 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0554 | Val rms_score: 0.9562
145
+ 2025-09-18 17:55:05,229 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0611 | Val rms_score: 0.9327
146
+ 2025-09-18 17:55:08,752 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0292 | Val rms_score: 0.8849
147
+ 2025-09-18 17:55:08,929 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 153
148
+ 2025-09-18 17:55:09,475 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.8849
149
+ 2025-09-18 17:55:12,627 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0263 | Val rms_score: 0.8933
150
+ 2025-09-18 17:55:13,361 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0237 | Val rms_score: 0.8640
151
+ 2025-09-18 17:55:13,853 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 187
152
+ 2025-09-18 17:55:14,403 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.8640
153
+ 2025-09-18 17:55:17,548 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0162 | Val rms_score: 0.8806
154
+ 2025-09-18 17:55:21,440 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0207 | Val rms_score: 0.8488
155
+ 2025-09-18 17:55:21,620 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 221
156
+ 2025-09-18 17:55:22,178 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val rms_score: 0.8488
157
+ 2025-09-18 17:55:25,874 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0286 | Val rms_score: 0.7976
158
+ 2025-09-18 17:55:26,053 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 238
159
+ 2025-09-18 17:55:26,642 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.7976
160
+ 2025-09-18 17:55:30,310 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0487 | Val rms_score: 0.8609
161
+ 2025-09-18 17:55:33,337 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0365 | Val rms_score: 0.8583
162
+ 2025-09-18 17:55:36,651 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0293 | Val rms_score: 0.8428
163
+ 2025-09-18 17:55:40,263 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0177 | Val rms_score: 0.8411
164
+ 2025-09-18 17:55:41,479 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0157 | Val rms_score: 0.8677
165
+ 2025-09-18 17:55:44,419 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0165 | Val rms_score: 0.8466
166
+ 2025-09-18 17:55:47,765 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0106 | Val rms_score: 0.8530
167
+ 2025-09-18 17:55:51,807 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0125 | Val rms_score: 0.8631
168
+ 2025-09-18 17:55:55,099 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0214 | Val rms_score: 0.8215
169
+ 2025-09-18 17:55:58,067 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0128 | Val rms_score: 0.8219
170
+ 2025-09-18 17:56:01,728 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0148 | Val rms_score: 0.9225
171
+ 2025-09-18 17:56:05,549 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0680 | Val rms_score: 0.8238
172
+ 2025-09-18 17:56:09,450 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1048 | Val rms_score: 0.9146
173
+ 2025-09-18 17:56:12,374 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0671 | Val rms_score: 1.0110
174
+ 2025-09-18 17:56:13,404 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0443 | Val rms_score: 0.8937
175
+ 2025-09-18 17:56:17,010 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0238 | Val rms_score: 0.8479
176
+ 2025-09-18 17:56:20,012 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0156 | Val rms_score: 0.8484
177
+ 2025-09-18 17:56:23,605 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0161 | Val rms_score: 0.8306
178
+ 2025-09-18 17:56:27,355 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0136 | Val rms_score: 0.8151
179
+ 2025-09-18 17:56:31,024 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0135 | Val rms_score: 0.8188
180
+ 2025-09-18 17:56:33,979 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0128 | Val rms_score: 0.8352
181
+ 2025-09-18 17:56:37,332 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0169 | Val rms_score: 0.7613
182
+ 2025-09-18 17:56:37,814 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 612
183
+ 2025-09-18 17:56:38,429 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 36 with val rms_score: 0.7613
184
+ 2025-09-18 17:56:41,949 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0763 | Val rms_score: 0.8981
185
+ 2025-09-18 17:56:43,194 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0703 | Val rms_score: 0.9450
186
+ 2025-09-18 17:56:46,598 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1351 | Val rms_score: 1.0522
187
+ 2025-09-18 17:56:49,573 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0526 | Val rms_score: 1.0316
188
+ 2025-09-18 17:56:53,326 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0288 | Val rms_score: 1.0100
189
+ 2025-09-18 17:56:57,167 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0198 | Val rms_score: 1.0149
190
+ 2025-09-18 17:57:00,042 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0198 | Val rms_score: 1.0117
191
+ 2025-09-18 17:57:03,829 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0207 | Val rms_score: 0.9938
192
+ 2025-09-18 17:57:07,431 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0225 | Val rms_score: 0.9904
193
+ 2025-09-18 17:57:10,375 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0247 | Val rms_score: 1.0013
194
+ 2025-09-18 17:57:11,611 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0129 | Val rms_score: 0.9952
195
+ 2025-09-18 17:57:15,334 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0115 | Val rms_score: 0.9948
196
+ 2025-09-18 17:57:18,719 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0109 | Val rms_score: 0.9985
197
+ 2025-09-18 17:57:21,634 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0095 | Val rms_score: 0.9882
198
+ 2025-09-18 17:57:25,486 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0095 | Val rms_score: 1.0102
199
+ 2025-09-18 17:57:29,582 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0105 | Val rms_score: 0.9790
200
+ 2025-09-18 17:57:32,682 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0040 | Val rms_score: 0.9924
201
+ 2025-09-18 17:57:36,065 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0129 | Val rms_score: 0.9887
202
+ 2025-09-18 17:57:39,645 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0073 | Val rms_score: 0.9793
203
+ 2025-09-18 17:57:40,628 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0081 | Val rms_score: 0.9635
204
+ 2025-09-18 17:57:43,940 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0088 | Val rms_score: 0.9713
205
+ 2025-09-18 17:57:47,583 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0123 | Val rms_score: 0.9989
206
+ 2025-09-18 17:57:52,314 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0327 | Val rms_score: 0.9764
207
+ 2025-09-18 17:57:55,990 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0186 | Val rms_score: 0.9875
208
+ 2025-09-18 17:57:58,824 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0127 | Val rms_score: 0.9859
209
+ 2025-09-18 17:58:02,393 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0110 | Val rms_score: 0.9799
210
+ 2025-09-18 17:58:06,137 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0103 | Val rms_score: 0.9696
211
+ 2025-09-18 17:58:09,421 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0092 | Val rms_score: 0.9684
212
+ 2025-09-18 17:58:12,424 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0050 | Val rms_score: 0.9654
213
+ 2025-09-18 17:58:13,514 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0101 | Val rms_score: 0.9610
214
+ 2025-09-18 17:58:17,512 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0222 | Val rms_score: 0.9311
215
+ 2025-09-18 17:58:20,920 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0118 | Val rms_score: 0.9595
216
+ 2025-09-18 17:58:23,859 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0090 | Val rms_score: 0.9531
217
+ 2025-09-18 17:58:27,496 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0116 | Val rms_score: 0.9701
218
+ 2025-09-18 17:58:30,931 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0161 | Val rms_score: 1.0181
219
+ 2025-09-18 17:58:34,579 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0496 | Val rms_score: 1.0518
220
+ 2025-09-18 17:58:37,719 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0370 | Val rms_score: 1.0445
221
+ 2025-09-18 17:58:41,615 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0206 | Val rms_score: 1.0079
222
+ 2025-09-18 17:58:42,877 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0132 | Val rms_score: 0.9903
223
+ 2025-09-18 17:58:45,907 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0111 | Val rms_score: 0.9906
224
+ 2025-09-18 17:58:49,197 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0166 | Val rms_score: 0.9983
225
+ 2025-09-18 17:58:52,097 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0510 | Val rms_score: 0.9764
226
+ 2025-09-18 17:58:55,714 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0124 | Val rms_score: 0.9800
227
+ 2025-09-18 17:58:59,379 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0092 | Val rms_score: 0.9683
228
+ 2025-09-18 17:59:02,289 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0089 | Val rms_score: 0.9605
229
+ 2025-09-18 17:59:06,054 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0083 | Val rms_score: 0.9539
230
+ 2025-09-18 17:59:09,800 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0073 | Val rms_score: 0.9538
231
+ 2025-09-18 17:59:12,922 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0069 | Val rms_score: 0.9570
232
+ 2025-09-18 17:59:14,422 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0071 | Val rms_score: 0.9508
233
+ 2025-09-18 17:59:17,468 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0074 | Val rms_score: 0.9627
234
+ 2025-09-18 17:59:21,791 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0162 | Val rms_score: 0.9612
235
+ 2025-09-18 17:59:25,262 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0111 | Val rms_score: 0.9662
236
+ 2025-09-18 17:59:29,128 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0096 | Val rms_score: 0.9629
237
+ 2025-09-18 17:59:33,184 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0087 | Val rms_score: 0.9489
238
+ 2025-09-18 17:59:36,855 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0105 | Val rms_score: 0.9468
239
+ 2025-09-18 17:59:40,981 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0141 | Val rms_score: 0.9527
240
+ 2025-09-18 17:59:41,672 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0096 | Val rms_score: 0.9499
241
+ 2025-09-18 17:59:45,477 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0082 | Val rms_score: 0.9547
242
+ 2025-09-18 17:59:48,526 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0070 | Val rms_score: 0.9501
243
+ 2025-09-18 17:59:52,365 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0061 | Val rms_score: 0.9329
244
+ 2025-09-18 17:59:56,039 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0066 | Val rms_score: 0.9487
245
+ 2025-09-18 17:59:59,837 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0073 | Val rms_score: 0.9423
246
+ 2025-09-18 18:00:03,642 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0070 | Val rms_score: 0.9374
247
+ 2025-09-18 18:00:06,982 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0067 | Val rms_score: 0.9373
248
+ 2025-09-18 18:00:07,603 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5082
249
+ 2025-09-18 18:00:07,945 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset freesolv at 2025-09-18_18-00-07
250
+ 2025-09-18 18:00:11,288 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.7500 | Val rms_score: 1.1766
251
+ 2025-09-18 18:00:11,288 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
252
+ 2025-09-18 18:00:12,046 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.1766
253
+ 2025-09-18 18:00:12,615 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2426 | Val rms_score: 1.1083
254
+ 2025-09-18 18:00:12,799 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
255
+ 2025-09-18 18:00:13,332 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.1083
256
+ 2025-09-18 18:00:17,262 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1498 | Val rms_score: 1.0239
257
+ 2025-09-18 18:00:17,433 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
258
+ 2025-09-18 18:00:17,980 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.0239
259
+ 2025-09-18 18:00:21,570 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0974 | Val rms_score: 0.9397
260
+ 2025-09-18 18:00:21,741 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
261
+ 2025-09-18 18:00:22,264 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.9397
262
+ 2025-09-18 18:00:26,159 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0749 | Val rms_score: 0.9224
263
+ 2025-09-18 18:00:26,337 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
264
+ 2025-09-18 18:00:26,890 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.9224
265
+ 2025-09-18 18:00:30,830 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0635 | Val rms_score: 0.8997
266
+ 2025-09-18 18:00:31,311 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 102
267
+ 2025-09-18 18:00:31,857 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.8997
268
+ 2025-09-18 18:00:35,123 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0924 | Val rms_score: 0.9772
269
+ 2025-09-18 18:00:38,830 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1526 | Val rms_score: 0.9750
270
+ 2025-09-18 18:00:42,697 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0915 | Val rms_score: 0.9650
271
+ 2025-09-18 18:00:43,602 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0850 | Val rms_score: 0.9180
272
+ 2025-09-18 18:00:47,397 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0643 | Val rms_score: 0.9123
273
+ 2025-09-18 18:00:50,802 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0242 | Val rms_score: 0.9031
274
+ 2025-09-18 18:00:54,573 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0512 | Val rms_score: 0.9180
275
+ 2025-09-18 18:00:57,929 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0685 | Val rms_score: 0.8810
276
+ 2025-09-18 18:00:58,121 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 238
277
+ 2025-09-18 18:00:58,712 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.8810
278
+ 2025-09-18 18:01:02,644 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0471 | Val rms_score: 0.9014
279
+ 2025-09-18 18:01:05,863 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0466 | Val rms_score: 0.8930
280
+ 2025-09-18 18:01:10,147 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0335 | Val rms_score: 0.9000
281
+ 2025-09-18 18:01:11,201 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0505 | Val rms_score: 0.9167
282
+ 2025-09-18 18:01:14,949 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0625 | Val rms_score: 0.8945
283
+ 2025-09-18 18:01:18,694 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0423 | Val rms_score: 0.9052
284
+ 2025-09-18 18:01:21,867 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0335 | Val rms_score: 0.8589
285
+ 2025-09-18 18:01:22,354 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 357
286
+ 2025-09-18 18:01:22,947 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.8589
287
+ 2025-09-18 18:01:27,006 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0303 | Val rms_score: 0.8876
288
+ 2025-09-18 18:01:30,652 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0322 | Val rms_score: 0.8616
289
+ 2025-09-18 18:01:34,478 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0264 | Val rms_score: 0.8766
290
+ 2025-09-18 18:01:37,457 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0236 | Val rms_score: 0.8436
291
+ 2025-09-18 18:01:37,638 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 425
292
+ 2025-09-18 18:01:38,301 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 25 with val rms_score: 0.8436
293
+ 2025-09-18 18:01:42,146 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0195 | Val rms_score: 0.8599
294
+ 2025-09-18 18:01:43,627 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0183 | Val rms_score: 0.8455
295
+ 2025-09-18 18:01:47,460 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0177 | Val rms_score: 0.8534
296
+ 2025-09-18 18:01:50,432 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0172 | Val rms_score: 0.8521
297
+ 2025-09-18 18:01:54,190 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0136 | Val rms_score: 0.8538
298
+ 2025-09-18 18:01:58,173 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0176 | Val rms_score: 0.8518
299
+ 2025-09-18 18:02:02,573 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0170 | Val rms_score: 0.8533
300
+ 2025-09-18 18:02:06,445 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0184 | Val rms_score: 0.8476
301
+ 2025-09-18 18:02:09,987 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0163 | Val rms_score: 0.8592
302
+ 2025-09-18 18:02:11,378 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0137 | Val rms_score: 0.8516
303
+ 2025-09-18 18:02:14,493 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0160 | Val rms_score: 0.8561
304
+ 2025-09-18 18:02:18,759 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0146 | Val rms_score: 0.8487
305
+ 2025-09-18 18:02:21,714 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0165 | Val rms_score: 0.8487
306
+ 2025-09-18 18:02:25,578 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0148 | Val rms_score: 0.8350
307
+ 2025-09-18 18:02:25,724 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 663
308
+ 2025-09-18 18:02:26,293 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 39 with val rms_score: 0.8350
309
+ 2025-09-18 18:02:29,438 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0144 | Val rms_score: 0.8481
310
+ 2025-09-18 18:02:32,744 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0165 | Val rms_score: 0.8317
311
+ 2025-09-18 18:02:33,257 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 697
312
+ 2025-09-18 18:02:33,910 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 41 with val rms_score: 0.8317
313
+ 2025-09-18 18:02:37,336 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0140 | Val rms_score: 0.8464
314
+ 2025-09-18 18:02:41,192 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0124 | Val rms_score: 0.8367
315
+ 2025-09-18 18:02:42,507 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0132 | Val rms_score: 0.8394
316
+ 2025-09-18 18:02:46,273 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0144 | Val rms_score: 0.8275
317
+ 2025-09-18 18:02:46,451 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 765
318
+ 2025-09-18 18:02:46,998 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 45 with val rms_score: 0.8275
319
+ 2025-09-18 18:02:50,968 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0136 | Val rms_score: 0.8257
320
+ 2025-09-18 18:02:51,466 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 782
321
+ 2025-09-18 18:02:52,015 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 46 with val rms_score: 0.8257
322
+ 2025-09-18 18:02:55,942 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0184 | Val rms_score: 0.8468
323
+ 2025-09-18 18:02:59,808 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0161 | Val rms_score: 0.8146
324
+ 2025-09-18 18:02:59,991 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 816
325
+ 2025-09-18 18:03:00,524 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 48 with val rms_score: 0.8146
326
+ 2025-09-18 18:03:04,414 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0170 | Val rms_score: 0.8294
327
+ 2025-09-18 18:03:08,338 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0178 | Val rms_score: 0.8732
328
+ 2025-09-18 18:03:11,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0404 | Val rms_score: 0.8316
329
+ 2025-09-18 18:03:13,467 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0395 | Val rms_score: 0.8579
330
+ 2025-09-18 18:03:16,566 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0264 | Val rms_score: 0.8329
331
+ 2025-09-18 18:03:20,287 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0203 | Val rms_score: 0.8709
332
+ 2025-09-18 18:03:23,390 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0136 | Val rms_score: 0.8445
333
+ 2025-09-18 18:03:27,221 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0113 | Val rms_score: 0.8552
334
+ 2025-09-18 18:03:30,985 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0112 | Val rms_score: 0.8445
335
+ 2025-09-18 18:03:34,899 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0098 | Val rms_score: 0.8477
336
+ 2025-09-18 18:03:39,592 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0116 | Val rms_score: 0.8378
337
+ 2025-09-18 18:03:43,312 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0099 | Val rms_score: 0.8443
338
+ 2025-09-18 18:03:44,720 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0093 | Val rms_score: 0.8329
339
+ 2025-09-18 18:03:48,418 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0101 | Val rms_score: 0.8371
340
+ 2025-09-18 18:03:52,216 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0092 | Val rms_score: 0.8340
341
+ 2025-09-18 18:03:55,216 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0089 | Val rms_score: 0.8397
342
+ 2025-09-18 18:03:59,023 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0134 | Val rms_score: 0.8255
343
+ 2025-09-18 18:04:02,673 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0096 | Val rms_score: 0.8349
344
+ 2025-09-18 18:04:06,997 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0115 | Val rms_score: 0.8054
345
+ 2025-09-18 18:04:07,145 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1139
346
+ 2025-09-18 18:04:07,703 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 67 with val rms_score: 0.8054
347
+ 2025-09-18 18:04:11,092 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0138 | Val rms_score: 0.8749
348
+ 2025-09-18 18:04:12,463 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0333 | Val rms_score: 0.8793
349
+ 2025-09-18 18:04:16,136 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0326 | Val rms_score: 0.7795
350
+ 2025-09-18 18:04:16,316 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1190
351
+ 2025-09-18 18:04:16,896 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 70 with val rms_score: 0.7795
352
+ 2025-09-18 18:04:20,865 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0213 | Val rms_score: 0.9359
353
+ 2025-09-18 18:04:25,024 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0288 | Val rms_score: 0.9103
354
+ 2025-09-18 18:04:28,679 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0310 | Val rms_score: 0.8522
355
+ 2025-09-18 18:04:32,472 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0159 | Val rms_score: 0.8508
356
+ 2025-09-18 18:04:35,519 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0105 | Val rms_score: 0.8652
357
+ 2025-09-18 18:04:39,384 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0105 | Val rms_score: 0.8434
358
+ 2025-09-18 18:04:42,761 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0086 | Val rms_score: 0.8455
359
+ 2025-09-18 18:04:44,102 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0084 | Val rms_score: 0.8558
360
+ 2025-09-18 18:04:47,542 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0105 | Val rms_score: 0.8279
361
+ 2025-09-18 18:04:51,411 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0099 | Val rms_score: 0.8401
362
+ 2025-09-18 18:04:55,198 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0082 | Val rms_score: 0.8212
363
+ 2025-09-18 18:04:59,306 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0084 | Val rms_score: 0.8490
364
+ 2025-09-18 18:05:03,013 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0080 | Val rms_score: 0.8232
365
+ 2025-09-18 18:05:06,059 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0089 | Val rms_score: 0.8268
366
+ 2025-09-18 18:05:10,051 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0093 | Val rms_score: 0.8210
367
+ 2025-09-18 18:05:13,378 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0155 | Val rms_score: 0.8517
368
+ 2025-09-18 18:05:15,164 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0163 | Val rms_score: 0.8640
369
+ 2025-09-18 18:05:18,781 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0136 | Val rms_score: 0.8400
370
+ 2025-09-18 18:05:22,548 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0140 | Val rms_score: 0.8239
371
+ 2025-09-18 18:05:26,306 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0352 | Val rms_score: 0.8754
372
+ 2025-09-18 18:05:29,683 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.1029 | Val rms_score: 0.8693
373
+ 2025-09-18 18:05:33,871 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0471 | Val rms_score: 0.8254
374
+ 2025-09-18 18:05:36,860 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0296 | Val rms_score: 0.9149
375
+ 2025-09-18 18:05:40,758 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0568 | Val rms_score: 0.9592
376
+ 2025-09-18 18:05:41,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0349 | Val rms_score: 0.8875
377
+ 2025-09-18 18:05:45,389 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0213 | Val rms_score: 0.8692
378
+ 2025-09-18 18:05:49,920 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0163 | Val rms_score: 0.8712
379
+ 2025-09-18 18:05:53,550 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0128 | Val rms_score: 0.8544
380
+ 2025-09-18 18:05:56,724 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0116 | Val rms_score: 0.8598
381
+ 2025-09-18 18:06:00,293 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0123 | Val rms_score: 0.8542
382
+ 2025-09-18 18:06:00,941 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5635
383
+ 2025-09-18 18:06:01,321 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.5445, Std Dev: 0.0257
logs_modchembert_regression_ModChemBERT-MLM-DAPT-TAFT/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_180601.log ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 18:06:01,322 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Running benchmark for dataset: lipo
2
+ 2025-09-18 18:06:01,322 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - dataset: lipo, tasks: ['exp'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 18:06:01,326 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset lipo at 2025-09-18_18-06-01
4
+ 2025-09-18 18:06:20,334 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2906 | Val rms_score: 0.7930
5
+ 2025-09-18 18:06:20,335 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
6
+ 2025-09-18 18:06:20,925 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.7930
7
+ 2025-09-18 18:06:41,230 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2297 | Val rms_score: 0.6927
8
+ 2025-09-18 18:06:41,409 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
9
+ 2025-09-18 18:06:41,954 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6927
10
+ 2025-09-18 18:07:05,358 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1906 | Val rms_score: 0.6719
11
+ 2025-09-18 18:07:05,494 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
12
+ 2025-09-18 18:07:06,053 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.6719
13
+ 2025-09-18 18:07:26,376 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1344 | Val rms_score: 0.6601
14
+ 2025-09-18 18:07:26,552 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
15
+ 2025-09-18 18:07:27,097 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.6601
16
+ 2025-09-18 18:07:46,599 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1325 | Val rms_score: 0.6669
17
+ 2025-09-18 18:08:08,477 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1052 | Val rms_score: 0.6639
18
+ 2025-09-18 18:08:27,657 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0848 | Val rms_score: 0.6591
19
+ 2025-09-18 18:08:27,798 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 735
20
+ 2025-09-18 18:08:28,345 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.6591
21
+ 2025-09-18 18:08:49,457 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0832 | Val rms_score: 0.6597
22
+ 2025-09-18 18:09:08,218 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0785 | Val rms_score: 0.6735
23
+ 2025-09-18 18:09:29,112 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0731 | Val rms_score: 0.6664
24
+ 2025-09-18 18:09:51,457 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0659 | Val rms_score: 0.6647
25
+ 2025-09-18 18:10:11,355 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0641 | Val rms_score: 0.6848
26
+ 2025-09-18 18:10:29,800 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0615 | Val rms_score: 0.6858
27
+ 2025-09-18 18:10:50,817 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0621 | Val rms_score: 0.6688
28
+ 2025-09-18 18:11:10,704 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0571 | Val rms_score: 0.6619
29
+ 2025-09-18 18:11:32,498 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0602 | Val rms_score: 0.6755
30
+ 2025-09-18 18:11:51,851 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0540 | Val rms_score: 0.6863
31
+ 2025-09-18 18:12:11,324 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0486 | Val rms_score: 0.6585
32
+ 2025-09-18 18:12:11,466 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1890
33
+ 2025-09-18 18:12:12,041 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 18 with val rms_score: 0.6585
34
+ 2025-09-18 18:12:37,448 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0503 | Val rms_score: 0.6754
35
+ 2025-09-18 18:12:54,220 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0525 | Val rms_score: 0.6588
36
+ 2025-09-18 18:13:16,059 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0461 | Val rms_score: 0.6610
37
+ 2025-09-18 18:13:36,078 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0656 | Val rms_score: 0.6731
38
+ 2025-09-18 18:13:56,871 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0474 | Val rms_score: 0.6672
39
+ 2025-09-18 18:14:16,938 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0465 | Val rms_score: 0.6608
40
+ 2025-09-18 18:14:35,017 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0462 | Val rms_score: 0.6840
41
+ 2025-09-18 18:14:55,021 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0544 | Val rms_score: 0.6650
42
+ 2025-09-18 18:15:15,354 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0460 | Val rms_score: 0.6658
43
+ 2025-09-18 18:15:36,450 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0477 | Val rms_score: 0.6774
44
+ 2025-09-18 18:15:57,881 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0455 | Val rms_score: 0.6845
45
+ 2025-09-18 18:16:18,780 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0456 | Val rms_score: 0.6812
46
+ 2025-09-18 18:16:40,554 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0477 | Val rms_score: 0.6707
47
+ 2025-09-18 18:17:00,185 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0427 | Val rms_score: 0.6626
48
+ 2025-09-18 18:17:22,778 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0411 | Val rms_score: 0.6595
49
+ 2025-09-18 18:17:42,303 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0406 | Val rms_score: 0.6696
50
+ 2025-09-18 18:18:02,315 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0423 | Val rms_score: 0.6647
51
+ 2025-09-18 18:18:24,790 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0475 | Val rms_score: 0.6650
52
+ 2025-09-18 18:18:44,130 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0417 | Val rms_score: 0.6731
53
+ 2025-09-18 18:19:04,922 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0370 | Val rms_score: 0.6698
54
+ 2025-09-18 18:19:28,291 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0380 | Val rms_score: 0.6695
55
+ 2025-09-18 18:19:48,604 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0383 | Val rms_score: 0.6729
56
+ 2025-09-18 18:20:08,546 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0410 | Val rms_score: 0.6630
57
+ 2025-09-18 18:20:28,465 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0449 | Val rms_score: 0.6569
58
+ 2025-09-18 18:20:28,613 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 4410
59
+ 2025-09-18 18:20:29,201 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 42 with val rms_score: 0.6569
60
+ 2025-09-18 18:20:51,208 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0372 | Val rms_score: 0.6510
61
+ 2025-09-18 18:20:51,386 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 4515
62
+ 2025-09-18 18:20:51,949 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 43 with val rms_score: 0.6510
63
+ 2025-09-18 18:21:11,366 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0432 | Val rms_score: 0.6739
64
+ 2025-09-18 18:21:33,789 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0378 | Val rms_score: 0.6667
65
+ 2025-09-18 18:21:53,172 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0419 | Val rms_score: 0.6765
66
+ 2025-09-18 18:22:12,186 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0424 | Val rms_score: 0.6586
67
+ 2025-09-18 18:22:34,345 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0371 | Val rms_score: 0.6657
68
+ 2025-09-18 18:22:55,242 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0396 | Val rms_score: 0.6693
69
+ 2025-09-18 18:23:16,429 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0387 | Val rms_score: 0.6685
70
+ 2025-09-18 18:23:35,134 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0384 | Val rms_score: 0.6715
71
+ 2025-09-18 18:23:55,359 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0388 | Val rms_score: 0.6632
72
+ 2025-09-18 18:24:15,821 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0394 | Val rms_score: 0.6628
73
+ 2025-09-18 18:24:35,826 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0350 | Val rms_score: 0.6738
74
+ 2025-09-18 18:24:56,177 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0381 | Val rms_score: 0.6696
75
+ 2025-09-18 18:25:16,636 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0391 | Val rms_score: 0.6672
76
+ 2025-09-18 18:25:36,992 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0360 | Val rms_score: 0.6657
77
+ 2025-09-18 18:25:59,313 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0385 | Val rms_score: 0.6622
78
+ 2025-09-18 18:26:19,318 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0383 | Val rms_score: 0.6811
79
+ 2025-09-18 18:26:39,472 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0367 | Val rms_score: 0.6776
80
+ 2025-09-18 18:27:00,425 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0330 | Val rms_score: 0.6763
81
+ 2025-09-18 18:27:23,086 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0426 | Val rms_score: 0.6694
82
+ 2025-09-18 18:27:41,245 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0352 | Val rms_score: 0.6725
83
+ 2025-09-18 18:28:00,418 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0361 | Val rms_score: 0.6737
84
+ 2025-09-18 18:28:23,012 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0394 | Val rms_score: 0.6717
85
+ 2025-09-18 18:28:42,303 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0383 | Val rms_score: 0.6812
86
+ 2025-09-18 18:29:02,482 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0377 | Val rms_score: 0.6756
87
+ 2025-09-18 18:29:25,083 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0355 | Val rms_score: 0.6841
88
+ 2025-09-18 18:29:44,870 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0332 | Val rms_score: 0.6597
89
+ 2025-09-18 18:30:03,939 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0342 | Val rms_score: 0.6861
90
+ 2025-09-18 18:30:23,305 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0369 | Val rms_score: 0.6766
91
+ 2025-09-18 18:30:45,682 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0365 | Val rms_score: 0.6625
92
+ 2025-09-18 18:31:05,447 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0375 | Val rms_score: 0.6661
93
+ 2025-09-18 18:31:23,739 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0335 | Val rms_score: 0.6678
94
+ 2025-09-18 18:31:45,727 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0342 | Val rms_score: 0.6781
95
+ 2025-09-18 18:32:05,985 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0342 | Val rms_score: 0.6741
96
+ 2025-09-18 18:32:26,565 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0358 | Val rms_score: 0.6658
97
+ 2025-09-18 18:32:48,364 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0330 | Val rms_score: 0.6605
98
+ 2025-09-18 18:33:08,199 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0337 | Val rms_score: 0.6730
99
+ 2025-09-18 18:33:28,465 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0372 | Val rms_score: 0.6761
100
+ 2025-09-18 18:33:51,765 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0432 | Val rms_score: 0.6655
101
+ 2025-09-18 18:34:11,975 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0354 | Val rms_score: 0.6656
102
+ 2025-09-18 18:34:31,977 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0383 | Val rms_score: 0.6622
103
+ 2025-09-18 18:34:56,165 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0342 | Val rms_score: 0.6586
104
+ 2025-09-18 18:35:16,420 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0336 | Val rms_score: 0.6651
105
+ 2025-09-18 18:35:37,719 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0323 | Val rms_score: 0.6598
106
+ 2025-09-18 18:35:56,883 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0317 | Val rms_score: 0.6688
107
+ 2025-09-18 18:36:20,719 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0344 | Val rms_score: 0.6718
108
+ 2025-09-18 18:36:41,656 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0359 | Val rms_score: 0.6640
109
+ 2025-09-18 18:37:01,915 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0341 | Val rms_score: 0.6629
110
+ 2025-09-18 18:37:25,462 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0335 | Val rms_score: 0.6661
111
+ 2025-09-18 18:37:46,330 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0352 | Val rms_score: 0.6728
112
+ 2025-09-18 18:38:06,940 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0346 | Val rms_score: 0.6704
113
+ 2025-09-18 18:38:27,386 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0333 | Val rms_score: 0.6682
114
+ 2025-09-18 18:38:50,715 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0344 | Val rms_score: 0.6713
115
+ 2025-09-18 18:39:11,962 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0322 | Val rms_score: 0.6793
116
+ 2025-09-18 18:39:32,524 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0320 | Val rms_score: 0.6906
117
+ 2025-09-18 18:39:56,268 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0311 | Val rms_score: 0.6736
118
+ 2025-09-18 18:40:16,384 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0334 | Val rms_score: 0.6676
119
+ 2025-09-18 18:40:36,078 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0325 | Val rms_score: 0.6641
120
+ 2025-09-18 18:40:37,642 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6850
121
+ 2025-09-18 18:40:38,047 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset lipo at 2025-09-18_18-40-38
122
+ 2025-09-18 18:40:57,138 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3047 | Val rms_score: 0.7197
123
+ 2025-09-18 18:40:57,138 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
124
+ 2025-09-18 18:40:57,748 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.7197
125
+ 2025-09-18 18:41:21,653 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2344 | Val rms_score: 0.6977
126
+ 2025-09-18 18:41:21,850 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
127
+ 2025-09-18 18:41:22,608 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6977
128
+ 2025-09-18 18:41:43,134 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1885 | Val rms_score: 0.7055
129
+ 2025-09-18 18:42:03,461 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1359 | Val rms_score: 0.6668
130
+ 2025-09-18 18:42:03,605 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
131
+ 2025-09-18 18:42:04,158 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.6668
132
+ 2025-09-18 18:42:24,100 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1087 | Val rms_score: 0.6701
133
+ 2025-09-18 18:42:46,764 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1177 | Val rms_score: 0.6543
134
+ 2025-09-18 18:42:47,251 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 630
135
+ 2025-09-18 18:42:47,792 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.6543
136
+ 2025-09-18 18:43:07,021 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1054 | Val rms_score: 0.6541
137
+ 2025-09-18 18:43:07,172 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 735
138
+ 2025-09-18 18:43:07,766 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.6541
139
+ 2025-09-18 18:43:27,391 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0840 | Val rms_score: 0.6492
140
+ 2025-09-18 18:43:27,574 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 840
141
+ 2025-09-18 18:43:28,186 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.6492
142
+ 2025-09-18 18:43:50,535 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0740 | Val rms_score: 0.6649
143
+ 2025-09-18 18:44:10,801 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0681 | Val rms_score: 0.6664
144
+ 2025-09-18 18:44:31,103 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0628 | Val rms_score: 0.6792
145
+ 2025-09-18 18:44:54,799 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0641 | Val rms_score: 0.6559
146
+ 2025-09-18 18:45:14,441 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0601 | Val rms_score: 0.6576
147
+ 2025-09-18 18:45:33,548 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0683 | Val rms_score: 0.6873
148
+ 2025-09-18 18:45:55,923 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0617 | Val rms_score: 0.6826
149
+ 2025-09-18 18:46:15,551 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0629 | Val rms_score: 0.6612
150
+ 2025-09-18 18:46:35,223 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0496 | Val rms_score: 0.6474
151
+ 2025-09-18 18:46:35,376 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1785
152
+ 2025-09-18 18:46:36,113 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val rms_score: 0.6474
153
+ 2025-09-18 18:46:56,139 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0486 | Val rms_score: 0.6551
154
+ 2025-09-18 18:47:18,850 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0497 | Val rms_score: 0.6466
155
+ 2025-09-18 18:47:19,001 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1995
156
+ 2025-09-18 18:47:19,570 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 19 with val rms_score: 0.6466
157
+ 2025-09-18 18:47:40,986 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0462 | Val rms_score: 0.6439
158
+ 2025-09-18 18:47:41,150 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2100
159
+ 2025-09-18 18:47:41,724 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val rms_score: 0.6439
160
+ 2025-09-18 18:48:01,611 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0453 | Val rms_score: 0.6679
161
+ 2025-09-18 18:48:25,010 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0500 | Val rms_score: 0.6742
162
+ 2025-09-18 18:48:44,626 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0451 | Val rms_score: 0.6568
163
+ 2025-09-18 18:48:58,102 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0492 | Val rms_score: 0.6486
164
+ 2025-09-18 18:49:14,602 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0450 | Val rms_score: 0.6563
165
+ 2025-09-18 18:49:29,181 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0570 | Val rms_score: 0.6762
166
+ 2025-09-18 18:49:46,954 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0464 | Val rms_score: 0.6621
167
+ 2025-09-18 18:50:01,679 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0531 | Val rms_score: 0.6713
168
+ 2025-09-18 18:50:19,819 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0448 | Val rms_score: 0.6629
169
+ 2025-09-18 18:50:34,892 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0428 | Val rms_score: 0.6741
170
+ 2025-09-18 18:50:52,333 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0457 | Val rms_score: 0.6633
171
+ 2025-09-18 18:51:06,703 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0380 | Val rms_score: 0.6657
172
+ 2025-09-18 18:51:24,765 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0462 | Val rms_score: 0.6680
173
+ 2025-09-18 18:51:39,268 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0444 | Val rms_score: 0.6680
174
+ 2025-09-18 18:51:56,461 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0427 | Val rms_score: 0.6732
175
+ 2025-09-18 18:52:11,499 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0447 | Val rms_score: 0.6627
176
+ 2025-09-18 18:52:25,543 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0414 | Val rms_score: 0.6701
177
+ 2025-09-18 18:52:43,470 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0410 | Val rms_score: 0.6608
178
+ 2025-09-18 18:53:00,385 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0431 | Val rms_score: 0.6678
179
+ 2025-09-18 18:53:17,876 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0387 | Val rms_score: 0.6608
180
+ 2025-09-18 18:53:32,336 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0414 | Val rms_score: 0.6501
181
+ 2025-09-18 18:53:50,969 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0424 | Val rms_score: 0.6715
182
+ 2025-09-18 18:54:05,361 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0440 | Val rms_score: 0.6771
183
+ 2025-09-18 18:54:23,541 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0346 | Val rms_score: 0.6700
184
+ 2025-09-18 18:54:37,691 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0364 | Val rms_score: 0.6738
185
+ 2025-09-18 18:54:55,813 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0406 | Val rms_score: 0.6647
186
+ 2025-09-18 18:55:09,008 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0411 | Val rms_score: 0.6810
187
+ 2025-09-18 18:55:25,221 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0422 | Val rms_score: 0.6581
188
+ 2025-09-18 18:55:41,909 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0441 | Val rms_score: 0.6723
189
+ 2025-09-18 18:55:56,140 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0395 | Val rms_score: 0.6697
190
+ 2025-09-18 18:56:14,188 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0384 | Val rms_score: 0.6748
191
+ 2025-09-18 18:56:27,880 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0523 | Val rms_score: 0.6769
192
+ 2025-09-18 18:56:44,523 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0380 | Val rms_score: 0.6667
193
+ 2025-09-18 18:56:58,400 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0368 | Val rms_score: 0.6871
194
+ 2025-09-18 18:57:15,389 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0360 | Val rms_score: 0.6711
195
+ 2025-09-18 18:57:30,443 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0389 | Val rms_score: 0.6712
196
+ 2025-09-18 18:57:48,780 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0381 | Val rms_score: 0.6648
197
+ 2025-09-18 18:58:04,360 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0347 | Val rms_score: 0.6681
198
+ 2025-09-18 18:58:21,825 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0354 | Val rms_score: 0.6686
199
+ 2025-09-18 18:58:35,847 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0369 | Val rms_score: 0.6644
200
+ 2025-09-18 18:58:52,095 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0369 | Val rms_score: 0.6616
201
+ 2025-09-18 18:59:08,177 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0381 | Val rms_score: 0.6531
202
+ 2025-09-18 18:59:25,590 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0406 | Val rms_score: 0.6524
203
+ 2025-09-18 18:59:43,106 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0391 | Val rms_score: 0.6500
204
+ 2025-09-18 18:59:57,590 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0375 | Val rms_score: 0.6614
205
+ 2025-09-18 19:00:12,362 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0349 | Val rms_score: 0.6693
206
+ 2025-09-18 19:00:30,175 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0395 | Val rms_score: 0.6536
207
+ 2025-09-18 19:00:46,458 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0402 | Val rms_score: 0.6739
208
+ 2025-09-18 19:01:03,027 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0401 | Val rms_score: 0.6758
209
+ 2025-09-18 19:01:19,161 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0358 | Val rms_score: 0.6601
210
+ 2025-09-18 19:01:31,380 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0366 | Val rms_score: 0.6692
211
+ 2025-09-18 19:01:48,585 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0385 | Val rms_score: 0.6710
212
+ 2025-09-18 19:02:06,178 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0368 | Val rms_score: 0.6635
213
+ 2025-09-18 19:02:19,296 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0357 | Val rms_score: 0.6752
214
+ 2025-09-18 19:02:37,070 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0350 | Val rms_score: 0.6726
215
+ 2025-09-18 19:02:51,558 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0373 | Val rms_score: 0.6683
216
+ 2025-09-18 19:03:08,059 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0320 | Val rms_score: 0.6629
217
+ 2025-09-18 19:03:23,458 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0326 | Val rms_score: 0.6589
218
+ 2025-09-18 19:03:39,509 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0316 | Val rms_score: 0.6588
219
+ 2025-09-18 19:03:55,769 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0338 | Val rms_score: 0.6554
220
+ 2025-09-18 19:04:11,593 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0318 | Val rms_score: 0.6682
221
+ 2025-09-18 19:04:28,186 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0336 | Val rms_score: 0.6588
222
+ 2025-09-18 19:04:43,479 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0385 | Val rms_score: 0.6586
223
+ 2025-09-18 19:04:59,079 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0340 | Val rms_score: 0.6661
224
+ 2025-09-18 19:05:14,671 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0367 | Val rms_score: 0.6719
225
+ 2025-09-18 19:05:31,509 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0372 | Val rms_score: 0.6705
226
+ 2025-09-18 19:05:47,586 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0337 | Val rms_score: 0.6499
227
+ 2025-09-18 19:06:04,256 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0344 | Val rms_score: 0.6606
228
+ 2025-09-18 19:06:20,093 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0366 | Val rms_score: 0.6739
229
+ 2025-09-18 19:06:36,592 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0341 | Val rms_score: 0.6706
230
+ 2025-09-18 19:06:57,260 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0348 | Val rms_score: 0.6704
231
+ 2025-09-18 19:07:08,812 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0318 | Val rms_score: 0.6615
232
+ 2025-09-18 19:07:29,299 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0358 | Val rms_score: 0.6571
233
+ 2025-09-18 19:07:51,214 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0346 | Val rms_score: 0.6550
234
+ 2025-09-18 19:08:11,972 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0350 | Val rms_score: 0.6590
235
+ 2025-09-18 19:08:34,741 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0316 | Val rms_score: 0.6640
236
+ 2025-09-18 19:08:56,015 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0325 | Val rms_score: 0.6555
237
+ 2025-09-18 19:09:22,498 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0307 | Val rms_score: 0.6672
238
+ 2025-09-18 19:09:38,156 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0326 | Val rms_score: 0.6563
239
+ 2025-09-18 19:09:58,878 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0316 | Val rms_score: 0.6629
240
+ 2025-09-18 19:10:00,552 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6782
241
+ 2025-09-18 19:10:00,925 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset lipo at 2025-09-18_19-10-00
242
+ 2025-09-18 19:10:21,627 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2719 | Val rms_score: 0.7383
243
+ 2025-09-18 19:10:21,627 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
244
+ 2025-09-18 19:10:23,145 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.7383
245
+ 2025-09-18 19:10:45,392 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2625 | Val rms_score: 0.7198
246
+ 2025-09-18 19:10:45,568 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
247
+ 2025-09-18 19:10:46,133 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7198
248
+ 2025-09-18 19:11:05,240 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2031 | Val rms_score: 0.6702
249
+ 2025-09-18 19:11:05,393 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
250
+ 2025-09-18 19:11:06,000 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.6702
251
+ 2025-09-18 19:11:32,657 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1391 | Val rms_score: 0.7071
252
+ 2025-09-18 19:11:49,011 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1194 | Val rms_score: 0.6608
253
+ 2025-09-18 19:11:49,154 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 525
254
+ 2025-09-18 19:11:49,727 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.6608
255
+ 2025-09-18 19:12:11,590 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1000 | Val rms_score: 0.6744
256
+ 2025-09-18 19:12:33,254 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1103 | Val rms_score: 0.7089
257
+ 2025-09-18 19:12:55,374 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0828 | Val rms_score: 0.6684
258
+ 2025-09-18 19:13:16,862 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0722 | Val rms_score: 0.6426
259
+ 2025-09-18 19:13:17,024 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 945
260
+ 2025-09-18 19:13:17,630 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.6426
261
+ 2025-09-18 19:13:40,874 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0694 | Val rms_score: 0.6597
262
+ 2025-09-18 19:14:03,353 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0651 | Val rms_score: 0.6721
263
+ 2025-09-18 19:14:25,943 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0602 | Val rms_score: 0.6690
264
+ 2025-09-18 19:14:48,094 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0644 | Val rms_score: 0.6679
265
+ 2025-09-18 19:15:10,236 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0603 | Val rms_score: 0.6712
266
+ 2025-09-18 19:15:32,701 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0571 | Val rms_score: 0.6762
267
+ 2025-09-18 19:15:54,900 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0566 | Val rms_score: 0.6970
268
+ 2025-09-18 19:16:17,741 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0526 | Val rms_score: 0.6725
269
+ 2025-09-18 19:16:41,207 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0510 | Val rms_score: 0.6772
270
+ 2025-09-18 19:17:03,108 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0520 | Val rms_score: 0.6586
271
+ 2025-09-18 19:17:27,836 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0497 | Val rms_score: 0.6628
272
+ 2025-09-18 19:17:50,089 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0492 | Val rms_score: 0.6643
273
+ 2025-09-18 19:18:13,016 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0559 | Val rms_score: 0.6910
274
+ 2025-09-18 19:18:35,399 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0518 | Val rms_score: 0.6772
275
+ 2025-09-18 19:18:57,760 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0539 | Val rms_score: 0.6710
276
+ 2025-09-18 19:19:19,603 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0469 | Val rms_score: 0.6760
277
+ 2025-09-18 19:19:41,951 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0497 | Val rms_score: 0.6734
278
+ 2025-09-18 19:20:04,228 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0406 | Val rms_score: 0.6689
279
+ 2025-09-18 19:20:26,637 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0418 | Val rms_score: 0.6653
280
+ 2025-09-18 19:20:49,557 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0434 | Val rms_score: 0.6724
281
+ 2025-09-18 19:21:12,743 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0494 | Val rms_score: 0.6664
282
+ 2025-09-18 19:21:33,872 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0463 | Val rms_score: 0.6582
283
+ 2025-09-18 19:21:56,798 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0464 | Val rms_score: 0.6723
284
+ 2025-09-18 19:22:18,645 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0462 | Val rms_score: 0.6849
285
+ 2025-09-18 19:22:39,895 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0408 | Val rms_score: 0.6800
286
+ 2025-09-18 19:23:00,633 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0417 | Val rms_score: 0.6795
287
+ 2025-09-18 19:23:21,178 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0387 | Val rms_score: 0.6665
288
+ 2025-09-18 19:23:43,374 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0434 | Val rms_score: 0.6611
289
+ 2025-09-18 19:24:04,922 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0387 | Val rms_score: 0.6652
290
+ 2025-09-18 19:24:28,806 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0387 | Val rms_score: 0.6607
291
+ 2025-09-18 19:24:49,275 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0358 | Val rms_score: 0.6656
292
+ 2025-09-18 19:25:10,967 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0504 | Val rms_score: 0.6619
293
+ 2025-09-18 19:25:33,306 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0418 | Val rms_score: 0.6687
294
+ 2025-09-18 19:25:54,970 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0404 | Val rms_score: 0.6668
295
+ 2025-09-18 19:26:16,526 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0434 | Val rms_score: 0.6589
296
+ 2025-09-18 19:26:38,441 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0431 | Val rms_score: 0.6832
297
+ 2025-09-18 19:26:59,712 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0414 | Val rms_score: 0.6761
298
+ 2025-09-18 19:27:22,176 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0364 | Val rms_score: 0.6703
299
+ 2025-09-18 19:27:44,371 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0408 | Val rms_score: 0.6627
300
+ 2025-09-18 19:28:06,535 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0411 | Val rms_score: 0.6705
301
+ 2025-09-18 19:28:27,704 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0409 | Val rms_score: 0.6705
302
+ 2025-09-18 19:28:48,919 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0418 | Val rms_score: 0.6693
303
+ 2025-09-18 19:29:11,332 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0427 | Val rms_score: 0.6770
304
+ 2025-09-18 19:29:33,337 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0365 | Val rms_score: 0.6603
305
+ 2025-09-18 19:29:59,235 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0382 | Val rms_score: 0.6815
306
+ 2025-09-18 19:30:15,744 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0379 | Val rms_score: 0.6667
307
+ 2025-09-18 19:30:37,603 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0373 | Val rms_score: 0.6863
308
+ 2025-09-18 19:30:59,140 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0368 | Val rms_score: 0.6675
309
+ 2025-09-18 19:31:22,574 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0372 | Val rms_score: 0.6711
310
+ 2025-09-18 19:31:43,724 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0373 | Val rms_score: 0.6645
311
+ 2025-09-18 19:32:05,142 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0355 | Val rms_score: 0.6722
312
+ 2025-09-18 19:32:27,107 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0367 | Val rms_score: 0.6746
313
+ 2025-09-18 19:32:48,090 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0344 | Val rms_score: 0.6636
314
+ 2025-09-18 19:33:09,525 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0370 | Val rms_score: 0.6632
315
+ 2025-09-18 19:33:31,065 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0326 | Val rms_score: 0.6756
316
+ 2025-09-18 19:33:52,917 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0373 | Val rms_score: 0.6680
317
+ 2025-09-18 19:34:14,390 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0352 | Val rms_score: 0.6679
318
+ 2025-09-18 19:34:37,727 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0375 | Val rms_score: 0.6687
319
+ 2025-09-18 19:35:04,661 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0381 | Val rms_score: 0.6650
320
+ 2025-09-18 19:35:19,366 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0345 | Val rms_score: 0.6731
321
+ 2025-09-18 19:35:41,208 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0364 | Val rms_score: 0.6671
322
+ 2025-09-18 19:36:03,531 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0375 | Val rms_score: 0.6636
323
+ 2025-09-18 19:36:25,079 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0324 | Val rms_score: 0.6737
324
+ 2025-09-18 19:36:46,901 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0406 | Val rms_score: 0.6622
325
+ 2025-09-18 19:37:08,318 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0350 | Val rms_score: 0.6720
326
+ 2025-09-18 19:37:34,928 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0360 | Val rms_score: 0.6762
327
+ 2025-09-18 19:37:51,600 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0355 | Val rms_score: 0.6689
328
+ 2025-09-18 19:38:15,051 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0360 | Val rms_score: 0.6575
329
+ 2025-09-18 19:38:41,815 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0326 | Val rms_score: 0.6591
330
+ 2025-09-18 19:38:56,944 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0316 | Val rms_score: 0.6654
331
+ 2025-09-18 19:39:24,205 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0355 | Val rms_score: 0.6694
332
+ 2025-09-18 19:39:39,943 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0309 | Val rms_score: 0.6644
333
+ 2025-09-18 19:40:01,649 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0352 | Val rms_score: 0.6787
334
+ 2025-09-18 19:40:23,393 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0367 | Val rms_score: 0.6675
335
+ 2025-09-18 19:40:45,124 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0328 | Val rms_score: 0.6711
336
+ 2025-09-18 19:41:07,035 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0341 | Val rms_score: 0.6646
337
+ 2025-09-18 19:41:29,822 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0339 | Val rms_score: 0.6863
338
+ 2025-09-18 19:41:51,724 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0359 | Val rms_score: 0.6621
339
+ 2025-09-18 19:42:12,773 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0346 | Val rms_score: 0.6590
340
+ 2025-09-18 19:42:33,876 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0318 | Val rms_score: 0.6649
341
+ 2025-09-18 19:42:55,385 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0338 | Val rms_score: 0.6825
342
+ 2025-09-18 19:43:17,274 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0378 | Val rms_score: 0.6755
343
+ 2025-09-18 19:43:39,812 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0349 | Val rms_score: 0.6672
344
+ 2025-09-18 19:44:01,078 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0315 | Val rms_score: 0.6722
345
+ 2025-09-18 19:44:22,403 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0357 | Val rms_score: 0.6812
346
+ 2025-09-18 19:44:43,096 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0354 | Val rms_score: 0.6753
347
+ 2025-09-18 19:45:06,765 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0334 | Val rms_score: 0.6663
348
+ 2025-09-18 19:45:27,935 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0320 | Val rms_score: 0.6789
349
+ 2025-09-18 19:45:48,971 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0304 | Val rms_score: 0.6657
350
+ 2025-09-18 19:46:09,307 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0308 | Val rms_score: 0.6606
351
+ 2025-09-18 19:46:33,546 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0308 | Val rms_score: 0.6666
352
+ 2025-09-18 19:46:34,755 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6828
353
+ 2025-09-18 19:46:35,134 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.6820, Std Dev: 0.0028
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b98fb0d741e22389fabb340a820828ccae0a5e5dc51f68f2c7d1647b3d325db
3
+ size 460409308
modeling_modchembert.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # This file is adapted from the transformers library.
19
+ # Modifications include:
20
+ # - Additional classifier_pooling options for ModChemBertForSequenceClassification
21
+ # - sum_mean, sum_sum, mean_sum, mean_mean: from ChemLM (utilizes all hidden states)
22
+ # - max_cls, cls_mha, max_seq_mha: from MaxPoolBERT (utilizes last k hidden states)
23
+ # - max_seq_mean: a merge between sum_mean and max_cls (utilizes last k hidden states)
24
+ # - Addition of ModChemBertPoolingAttention for cls_mha and max_seq_mha pooling options
25
+
26
+ import copy
27
+ import math
28
+ import typing
29
+ from contextlib import nullcontext
30
+
31
+ import torch
32
+ import torch.nn as nn
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
35
+ from transformers.modeling_outputs import MaskedLMOutput, SequenceClassifierOutput
36
+ from transformers.models.modernbert.modeling_modernbert import (
37
+ MODERNBERT_ATTENTION_FUNCTION,
38
+ ModernBertModel,
39
+ ModernBertPredictionHead,
40
+ ModernBertPreTrainedModel,
41
+ ModernBertRotaryEmbedding,
42
+ _pad_modernbert_output,
43
+ _unpad_modernbert_input,
44
+ )
45
+ from transformers.utils import logging
46
+
47
+ from .configuration_modchembert import ModChemBertConfig
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class InitWeightsMixin:
53
+ def _init_weights(self, module: nn.Module):
54
+ super()._init_weights(module) # type: ignore
55
+
56
+ cutoff_factor = self.config.initializer_cutoff_factor # type: ignore
57
+ if cutoff_factor is None:
58
+ cutoff_factor = 3
59
+
60
+ def init_weight(module: nn.Module, std: float):
61
+ if isinstance(module, nn.Linear):
62
+ nn.init.trunc_normal_(
63
+ module.weight,
64
+ mean=0.0,
65
+ std=std,
66
+ a=-cutoff_factor * std,
67
+ b=cutoff_factor * std,
68
+ )
69
+ if module.bias is not None:
70
+ nn.init.zeros_(module.bias)
71
+
72
+ stds = {
73
+ "in": self.config.initializer_range, # type: ignore
74
+ "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), # type: ignore
75
+ "final_out": self.config.hidden_size**-0.5, # type: ignore
76
+ }
77
+
78
+ if isinstance(module, ModChemBertForMaskedLM):
79
+ init_weight(module.decoder, stds["out"])
80
+ elif isinstance(module, ModChemBertForSequenceClassification):
81
+ init_weight(module.classifier, stds["final_out"])
82
+ elif isinstance(module, ModChemBertPoolingAttention):
83
+ init_weight(module.Wq, stds["in"])
84
+ init_weight(module.Wk, stds["in"])
85
+ init_weight(module.Wv, stds["in"])
86
+ init_weight(module.Wo, stds["out"])
87
+
88
+
89
+ class ModChemBertPoolingAttention(nn.Module):
90
+ """Performs multi-headed self attention on a batch of sequences."""
91
+
92
+ def __init__(self, config: ModChemBertConfig):
93
+ super().__init__()
94
+ self.config = copy.deepcopy(config)
95
+ # Override num_attention_heads to use classifier_pooling_num_attention_heads
96
+ self.config.num_attention_heads = config.classifier_pooling_num_attention_heads
97
+ # Override attention_dropout to use classifier_pooling_attention_dropout
98
+ self.config.attention_dropout = config.classifier_pooling_attention_dropout
99
+
100
+ if config.hidden_size % config.num_attention_heads != 0:
101
+ raise ValueError(
102
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads "
103
+ f"({config.num_attention_heads})"
104
+ )
105
+
106
+ self.attention_dropout = config.attention_dropout
107
+ self.num_heads = config.num_attention_heads
108
+ self.head_dim = config.hidden_size // config.num_attention_heads
109
+ self.all_head_size = self.head_dim * self.num_heads
110
+ self.Wq = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
111
+ self.Wk = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
112
+ self.Wv = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
113
+
114
+ # Use global attention
115
+ self.local_attention = (-1, -1)
116
+ rope_theta = config.global_rope_theta
117
+ # sdpa path from original ModernBert implementation
118
+ config_copy = copy.deepcopy(config)
119
+ config_copy.rope_theta = rope_theta
120
+ self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy)
121
+
122
+ self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
123
+ self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity()
124
+ self.pruned_heads = set()
125
+
126
+ def forward(
127
+ self,
128
+ q: torch.Tensor,
129
+ kv: torch.Tensor,
130
+ attention_mask: torch.Tensor | None = None,
131
+ **kwargs,
132
+ ) -> torch.Tensor:
133
+ bs, seq_len = kv.shape[:2]
134
+ q_proj: torch.Tensor = self.Wq(q)
135
+ k_proj: torch.Tensor = self.Wk(kv)
136
+ v_proj: torch.Tensor = self.Wv(kv)
137
+ qkv = torch.stack(
138
+ (
139
+ q_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
140
+ k_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
141
+ v_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
142
+ ),
143
+ dim=2,
144
+ ) # (bs, seq_len, 3, num_heads, head_dim)
145
+
146
+ device = kv.device
147
+ if attention_mask is None:
148
+ attention_mask = torch.ones((bs, seq_len), device=device, dtype=torch.bool)
149
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).long()
150
+
151
+ attn_outputs = MODERNBERT_ATTENTION_FUNCTION["sdpa"](
152
+ self,
153
+ qkv=qkv,
154
+ attention_mask=_prepare_4d_attention_mask(attention_mask, kv.dtype),
155
+ sliding_window_mask=None, # not needed when using global attention
156
+ position_ids=position_ids,
157
+ local_attention=self.local_attention,
158
+ bs=bs,
159
+ dim=self.all_head_size,
160
+ **kwargs,
161
+ )
162
+ hidden_states = attn_outputs[0]
163
+ hidden_states = self.out_drop(self.Wo(hidden_states))
164
+
165
+ return hidden_states
166
+
167
+
168
+ class ModChemBertForMaskedLM(InitWeightsMixin, ModernBertPreTrainedModel):
169
+ config_class = ModChemBertConfig
170
+ _tied_weights_keys = ["decoder.weight"]
171
+
172
+ def __init__(self, config: ModChemBertConfig):
173
+ super().__init__(config)
174
+ self.config = config
175
+ self.model = ModernBertModel(config)
176
+ self.head = ModernBertPredictionHead(config)
177
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
178
+
179
+ self.sparse_prediction = self.config.sparse_prediction
180
+ self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index
181
+
182
+ # Initialize weights and apply final processing
183
+ self.post_init()
184
+
185
+ def get_output_embeddings(self):
186
+ return self.decoder
187
+
188
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
189
+ self.decoder = new_embeddings
190
+
191
+ @torch.compile(dynamic=True)
192
+ def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
193
+ return self.decoder(self.head(output))
194
+
195
+ def forward(
196
+ self,
197
+ input_ids: torch.LongTensor | None = None,
198
+ attention_mask: torch.Tensor | None = None,
199
+ sliding_window_mask: torch.Tensor | None = None,
200
+ position_ids: torch.Tensor | None = None,
201
+ inputs_embeds: torch.Tensor | None = None,
202
+ labels: torch.Tensor | None = None,
203
+ indices: torch.Tensor | None = None,
204
+ cu_seqlens: torch.Tensor | None = None,
205
+ max_seqlen: int | None = None,
206
+ batch_size: int | None = None,
207
+ seq_len: int | None = None,
208
+ output_attentions: bool | None = None,
209
+ output_hidden_states: bool | None = None,
210
+ return_dict: bool | None = None,
211
+ **kwargs,
212
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | MaskedLMOutput:
213
+ r"""
214
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
215
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
216
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
217
+ far-away tokens in the local attention layers when not using Flash Attention.
218
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
219
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
220
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
221
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
222
+ max_seqlen (`int`, *optional*):
223
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
224
+ batch_size (`int`, *optional*):
225
+ Batch size of the input sequences. Used to pad the output tensors.
226
+ seq_len (`int`, *optional*):
227
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
228
+ """
229
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
230
+ self._maybe_set_compile()
231
+
232
+ if self.config._attn_implementation == "flash_attention_2": # noqa: SIM102
233
+ if indices is None and cu_seqlens is None and max_seqlen is None:
234
+ if batch_size is None and seq_len is None:
235
+ if inputs_embeds is not None:
236
+ batch_size, seq_len = inputs_embeds.shape[:2]
237
+ else:
238
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
239
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
240
+
241
+ if attention_mask is None:
242
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
243
+
244
+ if inputs_embeds is None:
245
+ with torch.no_grad():
246
+ input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
247
+ inputs=input_ids, # type: ignore
248
+ attention_mask=attention_mask, # type: ignore
249
+ position_ids=position_ids,
250
+ labels=labels,
251
+ )
252
+ else:
253
+ inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
254
+ inputs=inputs_embeds,
255
+ attention_mask=attention_mask, # type: ignore
256
+ position_ids=position_ids,
257
+ labels=labels,
258
+ )
259
+
260
+ outputs = self.model(
261
+ input_ids=input_ids,
262
+ attention_mask=attention_mask,
263
+ sliding_window_mask=sliding_window_mask,
264
+ position_ids=position_ids,
265
+ inputs_embeds=inputs_embeds,
266
+ indices=indices,
267
+ cu_seqlens=cu_seqlens,
268
+ max_seqlen=max_seqlen,
269
+ batch_size=batch_size,
270
+ seq_len=seq_len,
271
+ output_attentions=output_attentions,
272
+ output_hidden_states=output_hidden_states,
273
+ return_dict=return_dict,
274
+ )
275
+ last_hidden_state = outputs[0]
276
+
277
+ if self.sparse_prediction and labels is not None:
278
+ # flatten labels and output first
279
+ labels = labels.view(-1)
280
+ last_hidden_state = last_hidden_state.view(labels.shape[0], -1)
281
+
282
+ # then filter out the non-masked tokens
283
+ mask_tokens = labels != self.sparse_pred_ignore_index
284
+ last_hidden_state = last_hidden_state[mask_tokens]
285
+ labels = labels[mask_tokens]
286
+
287
+ logits = (
288
+ self.compiled_head(last_hidden_state)
289
+ if self.config.reference_compile
290
+ else self.decoder(self.head(last_hidden_state))
291
+ )
292
+
293
+ loss = None
294
+ if labels is not None:
295
+ loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
296
+
297
+ if self.config._attn_implementation == "flash_attention_2":
298
+ with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad():
299
+ logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) # type: ignore
300
+
301
+ if not return_dict:
302
+ output = (logits,)
303
+ return ((loss,) + output) if loss is not None else output
304
+
305
+ return MaskedLMOutput(
306
+ loss=loss,
307
+ logits=typing.cast(torch.FloatTensor, logits),
308
+ hidden_states=outputs.hidden_states,
309
+ attentions=outputs.attentions,
310
+ )
311
+
312
+
313
+ class ModChemBertForSequenceClassification(InitWeightsMixin, ModernBertPreTrainedModel):
314
+ config_class = ModChemBertConfig
315
+
316
+ def __init__(self, config: ModChemBertConfig):
317
+ super().__init__(config)
318
+ self.num_labels = config.num_labels
319
+ self.config = config
320
+
321
+ self.model = ModernBertModel(config)
322
+ if self.config.classifier_pooling in {"cls_mha", "max_seq_mha"}:
323
+ self.pooling_attn = ModChemBertPoolingAttention(config=self.config)
324
+ else:
325
+ self.pooling_attn = None
326
+ self.head = ModernBertPredictionHead(config)
327
+ self.drop = torch.nn.Dropout(config.classifier_dropout)
328
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
329
+
330
+ # Initialize weights and apply final processing
331
+ self.post_init()
332
+
333
+ def forward(
334
+ self,
335
+ input_ids: torch.LongTensor | None = None,
336
+ attention_mask: torch.Tensor | None = None,
337
+ sliding_window_mask: torch.Tensor | None = None,
338
+ position_ids: torch.Tensor | None = None,
339
+ inputs_embeds: torch.Tensor | None = None,
340
+ labels: torch.Tensor | None = None,
341
+ indices: torch.Tensor | None = None,
342
+ cu_seqlens: torch.Tensor | None = None,
343
+ max_seqlen: int | None = None,
344
+ batch_size: int | None = None,
345
+ seq_len: int | None = None,
346
+ output_attentions: bool | None = None,
347
+ output_hidden_states: bool | None = None,
348
+ return_dict: bool | None = None,
349
+ **kwargs,
350
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | SequenceClassifierOutput:
351
+ r"""
352
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
353
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
354
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
355
+ far-away tokens in the local attention layers when not using Flash Attention.
356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
360
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
361
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
362
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
363
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
364
+ max_seqlen (`int`, *optional*):
365
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
366
+ batch_size (`int`, *optional*):
367
+ Batch size of the input sequences. Used to pad the output tensors.
368
+ seq_len (`int`, *optional*):
369
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
370
+ """
371
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
372
+ self._maybe_set_compile()
373
+
374
+ if input_ids is not None:
375
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
376
+
377
+ if batch_size is None and seq_len is None:
378
+ if inputs_embeds is not None:
379
+ batch_size, seq_len = inputs_embeds.shape[:2]
380
+ else:
381
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
382
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
383
+
384
+ if attention_mask is None:
385
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
386
+
387
+ # Ensure output_hidden_states is True in case pooling mode requires all hidden states
388
+ output_hidden_states = True
389
+
390
+ outputs = self.model(
391
+ input_ids=input_ids,
392
+ attention_mask=attention_mask,
393
+ sliding_window_mask=sliding_window_mask,
394
+ position_ids=position_ids,
395
+ inputs_embeds=inputs_embeds,
396
+ indices=indices,
397
+ cu_seqlens=cu_seqlens,
398
+ max_seqlen=max_seqlen,
399
+ batch_size=batch_size,
400
+ seq_len=seq_len,
401
+ output_attentions=output_attentions,
402
+ output_hidden_states=output_hidden_states,
403
+ return_dict=return_dict,
404
+ )
405
+ last_hidden_state = outputs[0]
406
+ hidden_states = outputs[1]
407
+
408
+ last_hidden_state = _pool_modchembert_output(
409
+ self,
410
+ last_hidden_state,
411
+ hidden_states,
412
+ typing.cast(torch.Tensor, attention_mask),
413
+ )
414
+ pooled_output = self.head(last_hidden_state)
415
+ pooled_output = self.drop(pooled_output)
416
+ logits = self.classifier(pooled_output)
417
+
418
+ loss = None
419
+ if labels is not None:
420
+ if self.config.problem_type is None:
421
+ if self.num_labels == 1:
422
+ self.config.problem_type = "regression"
423
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
424
+ self.config.problem_type = "single_label_classification"
425
+ else:
426
+ self.config.problem_type = "multi_label_classification"
427
+
428
+ if self.config.problem_type == "regression":
429
+ loss_fct = MSELoss()
430
+ if self.num_labels == 1:
431
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
432
+ else:
433
+ loss = loss_fct(logits, labels)
434
+ elif self.config.problem_type == "single_label_classification":
435
+ loss_fct = CrossEntropyLoss()
436
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
437
+ elif self.config.problem_type == "multi_label_classification":
438
+ loss_fct = BCEWithLogitsLoss()
439
+ loss = loss_fct(logits, labels)
440
+
441
+ if not return_dict:
442
+ output = (logits,)
443
+ return ((loss,) + output) if loss is not None else output
444
+
445
+ return SequenceClassifierOutput(
446
+ loss=loss,
447
+ logits=logits,
448
+ hidden_states=outputs.hidden_states,
449
+ attentions=outputs.attentions,
450
+ )
451
+
452
+
453
+ def _pool_modchembert_output(
454
+ module: ModChemBertForSequenceClassification,
455
+ last_hidden_state: torch.Tensor,
456
+ hidden_states: list[torch.Tensor],
457
+ attention_mask: torch.Tensor,
458
+ ):
459
+ """
460
+ Apply pooling strategy to hidden states for sequence-level classification/regression tasks.
461
+
462
+ This function implements various pooling strategies to aggregate sequence representations
463
+ into a single vector for downstream classification or regression tasks. The pooling method
464
+ is determined by the `classifier_pooling` configuration parameter.
465
+
466
+ Available pooling strategies:
467
+ - cls: Use the CLS token ([CLS]) representation from the last hidden state
468
+ - mean: Average pooling over all tokens in the sequence (attention-weighted)
469
+ - max_cls: Element-wise max pooling over the last k hidden states, then take CLS token
470
+ - cls_mha: Multi-head attention with CLS token as query and full sequence as keys/values
471
+ - max_seq_mha: Max pooling over last k states + multi-head attention with CLS as query
472
+ - max_seq_mean: Max pooling over last k hidden states, then mean pooling over sequence
473
+ - sum_mean: Sum all hidden states across layers, then mean pool over sequence
474
+ - sum_sum: Sum all hidden states across layers, then sum pool over sequence
475
+ - mean_sum: Mean all hidden states across layers, then sum pool over sequence
476
+ - mean_mean: Mean all hidden states across layers, then mean pool over sequence
477
+
478
+ Args:
479
+ module: The model instance containing configuration and pooling attention if needed
480
+ last_hidden_state: Final layer hidden states of shape (batch_size, seq_len, hidden_size)
481
+ hidden_states: List of hidden states from all layers, each of shape (batch_size, seq_len, hidden_size)
482
+ attention_mask: Attention mask of shape (batch_size, seq_len) indicating valid tokens
483
+
484
+ Returns:
485
+ torch.Tensor: Pooled representation of shape (batch_size, hidden_size)
486
+
487
+ Note:
488
+ Some pooling strategies (cls_mha, max_seq_mha) require the module to have a pooling_attn
489
+ attribute containing a ModChemBertPoolingAttention instance.
490
+ """
491
+ config = typing.cast(ModChemBertConfig, module.config)
492
+ if config.classifier_pooling == "cls":
493
+ last_hidden_state = last_hidden_state[:, 0]
494
+ elif config.classifier_pooling == "mean":
495
+ last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(
496
+ dim=1, keepdim=True
497
+ )
498
+ elif config.classifier_pooling == "max_cls":
499
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
500
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
501
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
502
+ last_hidden_state = pooled_seq[:, 0, :] # (batch, hidden)
503
+ elif config.classifier_pooling == "cls_mha":
504
+ # Similar to max_seq_mha but without the max pooling step
505
+ # Query is CLS token (position 0); Keys/Values are full sequence
506
+ q = last_hidden_state[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
507
+ q = q.expand(-1, last_hidden_state.shape[1], -1) # (batch, seq_len, hidden)
508
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
509
+ q=q, kv=last_hidden_state, attention_mask=attention_mask
510
+ ) # (batch, seq_len, hidden)
511
+ last_hidden_state = torch.mean(attn_out, dim=1)
512
+ elif config.classifier_pooling == "max_seq_mha":
513
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
514
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
515
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
516
+ # Query is pooled CLS token (position 0); Keys/Values are pooled sequence
517
+ q = pooled_seq[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
518
+ q = q.expand(-1, pooled_seq.shape[1], -1) # (batch, seq_len, hidden)
519
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
520
+ q=q, kv=pooled_seq, attention_mask=attention_mask
521
+ ) # (batch, seq_len, hidden)
522
+ last_hidden_state = torch.mean(attn_out, dim=1)
523
+ elif config.classifier_pooling == "max_seq_mean":
524
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
525
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
526
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
527
+ last_hidden_state = torch.mean(pooled_seq, dim=1) # Mean over sequence length
528
+ elif config.classifier_pooling == "sum_mean":
529
+ # ChemLM uses the mean of all hidden states
530
+ # which outperforms using just the last layer mean or the cls embedding
531
+ # https://doi.org/10.1038/s42004-025-01484-4
532
+ # https://static-content.springer.com/esm/art%3A10.1038%2Fs42004-025-01484-4/MediaObjects/42004_2025_1484_MOESM2_ESM.pdf
533
+ all_hidden_states = torch.stack(hidden_states)
534
+ w = torch.sum(all_hidden_states, dim=0)
535
+ last_hidden_state = torch.mean(w, dim=1)
536
+ elif config.classifier_pooling == "sum_sum":
537
+ all_hidden_states = torch.stack(hidden_states)
538
+ w = torch.sum(all_hidden_states, dim=0)
539
+ last_hidden_state = torch.sum(w, dim=1)
540
+ elif config.classifier_pooling == "mean_sum":
541
+ all_hidden_states = torch.stack(hidden_states)
542
+ w = torch.mean(all_hidden_states, dim=0)
543
+ last_hidden_state = torch.sum(w, dim=1)
544
+ elif config.classifier_pooling == "mean_mean":
545
+ all_hidden_states = torch.stack(hidden_states)
546
+ w = torch.mean(all_hidden_states, dim=0)
547
+ last_hidden_state = torch.mean(w, dim=1)
548
+ return last_hidden_state
549
+
550
+
551
+ __all__ = [
552
+ "ModChemBertForMaskedLM",
553
+ "ModChemBertForSequenceClassification",
554
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
@@ -0,0 +1,2554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 256,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": 8,
13
+ "pad_id": 2,
14
+ "pad_type_id": 0,
15
+ "pad_token": "[PAD]"
16
+ },
17
+ "added_tokens": [
18
+ {
19
+ "id": 0,
20
+ "content": "[CLS]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": false,
25
+ "special": true
26
+ },
27
+ {
28
+ "id": 1,
29
+ "content": "[SEP]",
30
+ "single_word": false,
31
+ "lstrip": false,
32
+ "rstrip": false,
33
+ "normalized": false,
34
+ "special": true
35
+ },
36
+ {
37
+ "id": 2,
38
+ "content": "[PAD]",
39
+ "single_word": false,
40
+ "lstrip": false,
41
+ "rstrip": false,
42
+ "normalized": false,
43
+ "special": true
44
+ },
45
+ {
46
+ "id": 3,
47
+ "content": "[MASK]",
48
+ "single_word": false,
49
+ "lstrip": false,
50
+ "rstrip": false,
51
+ "normalized": false,
52
+ "special": true
53
+ },
54
+ {
55
+ "id": 2361,
56
+ "content": "[UNK]",
57
+ "single_word": false,
58
+ "lstrip": false,
59
+ "rstrip": false,
60
+ "normalized": false,
61
+ "special": true
62
+ }
63
+ ],
64
+ "normalizer": null,
65
+ "pre_tokenizer": {
66
+ "type": "ByteLevel",
67
+ "add_prefix_space": false,
68
+ "trim_offsets": true,
69
+ "use_regex": true
70
+ },
71
+ "post_processor": {
72
+ "type": "TemplateProcessing",
73
+ "single": [
74
+ {
75
+ "SpecialToken": {
76
+ "id": "[CLS]",
77
+ "type_id": 0
78
+ }
79
+ },
80
+ {
81
+ "Sequence": {
82
+ "id": "A",
83
+ "type_id": 0
84
+ }
85
+ },
86
+ {
87
+ "SpecialToken": {
88
+ "id": "[SEP]",
89
+ "type_id": 0
90
+ }
91
+ }
92
+ ],
93
+ "pair": [
94
+ {
95
+ "SpecialToken": {
96
+ "id": "[CLS]",
97
+ "type_id": 0
98
+ }
99
+ },
100
+ {
101
+ "Sequence": {
102
+ "id": "A",
103
+ "type_id": 0
104
+ }
105
+ },
106
+ {
107
+ "SpecialToken": {
108
+ "id": "[SEP]",
109
+ "type_id": 0
110
+ }
111
+ },
112
+ {
113
+ "Sequence": {
114
+ "id": "B",
115
+ "type_id": 0
116
+ }
117
+ },
118
+ {
119
+ "SpecialToken": {
120
+ "id": "[SEP]",
121
+ "type_id": 0
122
+ }
123
+ }
124
+ ],
125
+ "special_tokens": {
126
+ "[CLS]": {
127
+ "id": "[CLS]",
128
+ "ids": [
129
+ 0
130
+ ],
131
+ "tokens": [
132
+ "[CLS]"
133
+ ]
134
+ },
135
+ "[MASK]": {
136
+ "id": "[MASK]",
137
+ "ids": [
138
+ 3
139
+ ],
140
+ "tokens": [
141
+ "[MASK]"
142
+ ]
143
+ },
144
+ "[PAD]": {
145
+ "id": "[PAD]",
146
+ "ids": [
147
+ 2
148
+ ],
149
+ "tokens": [
150
+ "[PAD]"
151
+ ]
152
+ },
153
+ "[SEP]": {
154
+ "id": "[SEP]",
155
+ "ids": [
156
+ 1
157
+ ],
158
+ "tokens": [
159
+ "[SEP]"
160
+ ]
161
+ },
162
+ "[UNK]": {
163
+ "id": "[UNK]",
164
+ "ids": [
165
+ 2361
166
+ ],
167
+ "tokens": [
168
+ "[UNK]"
169
+ ]
170
+ }
171
+ }
172
+ },
173
+ "decoder": {
174
+ "type": "ByteLevel",
175
+ "add_prefix_space": false,
176
+ "trim_offsets": true,
177
+ "use_regex": true
178
+ },
179
+ "model": {
180
+ "type": "BPE",
181
+ "dropout": null,
182
+ "unk_token": "[UNK]",
183
+ "continuing_subword_prefix": null,
184
+ "end_of_word_suffix": null,
185
+ "fuse_unk": false,
186
+ "byte_fallback": false,
187
+ "ignore_merges": false,
188
+ "vocab": {
189
+ "[CLS]": 0,
190
+ "[SEP]": 1,
191
+ "[PAD]": 2,
192
+ "[MASK]": 3,
193
+ "C": 4,
194
+ "c": 5,
195
+ "(": 6,
196
+ ")": 7,
197
+ "1": 8,
198
+ "O": 9,
199
+ "N": 10,
200
+ "2": 11,
201
+ "=": 12,
202
+ "n": 13,
203
+ "3": 14,
204
+ "[C@H]": 15,
205
+ "[C@@H]": 16,
206
+ "F": 17,
207
+ "S": 18,
208
+ "4": 19,
209
+ "Cl": 20,
210
+ "-": 21,
211
+ "o": 22,
212
+ "s": 23,
213
+ "[nH]": 24,
214
+ "#": 25,
215
+ "/": 26,
216
+ "Br": 27,
217
+ "[C@]": 28,
218
+ "[C@@]": 29,
219
+ "[N+]": 30,
220
+ "[O-]": 31,
221
+ "5": 32,
222
+ "\\": 33,
223
+ ".": 34,
224
+ "I": 35,
225
+ "6": 36,
226
+ "[S@]": 37,
227
+ "[S@@]": 38,
228
+ "P": 39,
229
+ "[N-]": 40,
230
+ "[Si]": 41,
231
+ "7": 42,
232
+ "[n+]": 43,
233
+ "[2H]": 44,
234
+ "8": 45,
235
+ "[NH+]": 46,
236
+ "B": 47,
237
+ "9": 48,
238
+ "[C-]": 49,
239
+ "[Na+]": 50,
240
+ "[Cl-]": 51,
241
+ "[c-]": 52,
242
+ "[CH]": 53,
243
+ "%10": 54,
244
+ "[NH2+]": 55,
245
+ "[P+]": 56,
246
+ "[B]": 57,
247
+ "[I-]": 58,
248
+ "%11": 59,
249
+ "[CH2-]": 60,
250
+ "[O+]": 61,
251
+ "[NH3+]": 62,
252
+ "[C]": 63,
253
+ "[Br-]": 64,
254
+ "[IH2]": 65,
255
+ "[S-]": 66,
256
+ "[cH-]": 67,
257
+ "%12": 68,
258
+ "[nH+]": 69,
259
+ "[B-]": 70,
260
+ "[K+]": 71,
261
+ "[Sn]": 72,
262
+ "[Se]": 73,
263
+ "[CH-]": 74,
264
+ "[HH]": 75,
265
+ "[Y]": 76,
266
+ "[n-]": 77,
267
+ "[CH3-]": 78,
268
+ "[SiH]": 79,
269
+ "[S+]": 80,
270
+ "%13": 81,
271
+ "[SiH2]": 82,
272
+ "[Li+]": 83,
273
+ "[NH-]": 84,
274
+ "%14": 85,
275
+ "[Na]": 86,
276
+ "[CH2]": 87,
277
+ "[O-2]": 88,
278
+ "[U+2]": 89,
279
+ "[W]": 90,
280
+ "[Al]": 91,
281
+ "[P@]": 92,
282
+ "[Fe+2]": 93,
283
+ "[PH+]": 94,
284
+ "%15": 95,
285
+ "[Cl+3]": 96,
286
+ "[Zn+2]": 97,
287
+ "[Ir]": 98,
288
+ "[Mg+2]": 99,
289
+ "[Pt+2]": 100,
290
+ "[OH2+]": 101,
291
+ "[As]": 102,
292
+ "[Fe]": 103,
293
+ "[OH+]": 104,
294
+ "[Zr+2]": 105,
295
+ "[3H]": 106,
296
+ "[Ge]": 107,
297
+ "[SiH3]": 108,
298
+ "[OH-]": 109,
299
+ "[NH4+]": 110,
300
+ "[Cu+2]": 111,
301
+ "[P@@]": 112,
302
+ "p": 113,
303
+ "[Pt]": 114,
304
+ "%16": 115,
305
+ "[Ca+2]": 116,
306
+ "[Zr]": 117,
307
+ "[F-]": 118,
308
+ "[C+]": 119,
309
+ "[Ti]": 120,
310
+ "[P-]": 121,
311
+ "[V]": 122,
312
+ "[se]": 123,
313
+ "[U]": 124,
314
+ "[O]": 125,
315
+ "[Ni+2]": 126,
316
+ "[Zn]": 127,
317
+ "[Co]": 128,
318
+ "[Ni]": 129,
319
+ "[Pd+2]": 130,
320
+ "[Cu]": 131,
321
+ "%17": 132,
322
+ "[Cu+]": 133,
323
+ "[Te]": 134,
324
+ "[H+]": 135,
325
+ "[CH+]": 136,
326
+ "[Li]": 137,
327
+ "[Pd]": 138,
328
+ "[Mo]": 139,
329
+ "[Ru+2]": 140,
330
+ "[o+]": 141,
331
+ "[Re]": 142,
332
+ "[SH+]": 143,
333
+ "%18": 144,
334
+ "[Ac]": 145,
335
+ "[Cr]": 146,
336
+ "[NH2-]": 147,
337
+ "[K]": 148,
338
+ "[13CH2]": 149,
339
+ "[c]": 150,
340
+ "[Zr+4]": 151,
341
+ "[Tl]": 152,
342
+ "[13C]": 153,
343
+ "[Mn]": 154,
344
+ "[N@+]": 155,
345
+ "[Hg]": 156,
346
+ "[Rh]": 157,
347
+ "[Ti+4]": 158,
348
+ "[Sb]": 159,
349
+ "[Co+2]": 160,
350
+ "[Ag+]": 161,
351
+ "[Ru]": 162,
352
+ "%19": 163,
353
+ "[N@@+]": 164,
354
+ "[Ti+2]": 165,
355
+ "[Al+3]": 166,
356
+ "[Pb]": 167,
357
+ "[I+]": 168,
358
+ "[18F]": 169,
359
+ "[s+]": 170,
360
+ "[Rb+]": 171,
361
+ "[Ba+2]": 172,
362
+ "[H-]": 173,
363
+ "[Fe+3]": 174,
364
+ "[Ir+3]": 175,
365
+ "[13cH]": 176,
366
+ "%20": 177,
367
+ "[AlH2]": 178,
368
+ "[Au+]": 179,
369
+ "[13c]": 180,
370
+ "[SH2+]": 181,
371
+ "[Sn+2]": 182,
372
+ "[Mn+2]": 183,
373
+ "[Si-]": 184,
374
+ "[Ag]": 185,
375
+ "[N]": 186,
376
+ "[Bi]": 187,
377
+ "%21": 188,
378
+ "[In]": 189,
379
+ "[CH2+]": 190,
380
+ "[Y+3]": 191,
381
+ "[Ga]": 192,
382
+ "%22": 193,
383
+ "[Co+3]": 194,
384
+ "[Au]": 195,
385
+ "[13CH3]": 196,
386
+ "[Mg]": 197,
387
+ "[Cs+]": 198,
388
+ "[W+2]": 199,
389
+ "[Hf]": 200,
390
+ "[Zn+]": 201,
391
+ "[Se-]": 202,
392
+ "[S-2]": 203,
393
+ "[Ca]": 204,
394
+ "[pH]": 205,
395
+ "[ClH+]": 206,
396
+ "[Ti+3]": 207,
397
+ "%23": 208,
398
+ "[Ru+]": 209,
399
+ "[SH-]": 210,
400
+ "[13CH]": 211,
401
+ "[IH+]": 212,
402
+ "[Hf+4]": 213,
403
+ "[Rf]": 214,
404
+ "[OH3+]": 215,
405
+ "%24": 216,
406
+ "[Pt+4]": 217,
407
+ "[Zr+3]": 218,
408
+ "[PH3+]": 219,
409
+ "[Sr+2]": 220,
410
+ "[Cd+2]": 221,
411
+ "[Cd]": 222,
412
+ "%25": 223,
413
+ "[Os]": 224,
414
+ "[BH-]": 225,
415
+ "[Sn+4]": 226,
416
+ "[Cr+3]": 227,
417
+ "[Ru+3]": 228,
418
+ "[PH2+]": 229,
419
+ "[Rh+2]": 230,
420
+ "[V+2]": 231,
421
+ "%26": 232,
422
+ "[Gd+3]": 233,
423
+ "[Pb+2]": 234,
424
+ "[PH]": 235,
425
+ "[Hg+]": 236,
426
+ "[Mo+2]": 237,
427
+ "[AlH]": 238,
428
+ "[Sn+]": 239,
429
+ "%27": 240,
430
+ "[Pd+]": 241,
431
+ "b": 242,
432
+ "[Rh+3]": 243,
433
+ "[Hg+2]": 244,
434
+ "[15NH]": 245,
435
+ "[14C]": 246,
436
+ "%28": 247,
437
+ "[Mn+3]": 248,
438
+ "[Si+]": 249,
439
+ "[SeH]": 250,
440
+ "[13C@H]": 251,
441
+ "[NH]": 252,
442
+ "[Ga+3]": 253,
443
+ "[SiH-]": 254,
444
+ "[13C@@H]": 255,
445
+ "[Ce]": 256,
446
+ "[Au+3]": 257,
447
+ "[Bi+3]": 258,
448
+ "[15N]": 259,
449
+ "%29": 260,
450
+ "[BH3-]": 261,
451
+ "[14cH]": 262,
452
+ "[Ti+]": 263,
453
+ "[Gd]": 264,
454
+ "[cH+]": 265,
455
+ "[Cr+2]": 266,
456
+ "[Sb-]": 267,
457
+ "%30": 268,
458
+ "[Be+2]": 269,
459
+ "[Al+]": 270,
460
+ "[te]": 271,
461
+ "[11CH3]": 272,
462
+ "[Sm]": 273,
463
+ "[Pr]": 274,
464
+ "[La]": 275,
465
+ "%31": 276,
466
+ "[Al-]": 277,
467
+ "[Ta]": 278,
468
+ "[125I]": 279,
469
+ "[BH2-]": 280,
470
+ "[Nb]": 281,
471
+ "[Si@]": 282,
472
+ "%32": 283,
473
+ "[14c]": 284,
474
+ "[Sb+3]": 285,
475
+ "[Ba]": 286,
476
+ "%33": 287,
477
+ "[Os+2]": 288,
478
+ "[Si@@]": 289,
479
+ "[La+3]": 290,
480
+ "[15n]": 291,
481
+ "[15NH2]": 292,
482
+ "[Nd+3]": 293,
483
+ "%34": 294,
484
+ "[14CH2]": 295,
485
+ "[18O]": 296,
486
+ "[Nd]": 297,
487
+ "[GeH]": 298,
488
+ "[Ni+3]": 299,
489
+ "[Eu]": 300,
490
+ "[Dy+3]": 301,
491
+ "[Sc]": 302,
492
+ "%36": 303,
493
+ "[Se-2]": 304,
494
+ "[As+]": 305,
495
+ "%35": 306,
496
+ "[AsH]": 307,
497
+ "[Tb]": 308,
498
+ "[Sb+5]": 309,
499
+ "[Se+]": 310,
500
+ "[Ce+3]": 311,
501
+ "[c+]": 312,
502
+ "[In+3]": 313,
503
+ "[SnH]": 314,
504
+ "[Mo+4]": 315,
505
+ "%37": 316,
506
+ "[V+4]": 317,
507
+ "[Eu+3]": 318,
508
+ "[Hf+2]": 319,
509
+ "%38": 320,
510
+ "[Pt+]": 321,
511
+ "[p+]": 322,
512
+ "[123I]": 323,
513
+ "[Tl+]": 324,
514
+ "[Sm+3]": 325,
515
+ "%39": 326,
516
+ "[Yb+3]": 327,
517
+ "%40": 328,
518
+ "[Yb]": 329,
519
+ "[Os+]": 330,
520
+ "%41": 331,
521
+ "[10B]": 332,
522
+ "[Sc+3]": 333,
523
+ "[Al+2]": 334,
524
+ "%42": 335,
525
+ "[Sr]": 336,
526
+ "[Tb+3]": 337,
527
+ "[Po]": 338,
528
+ "[Tc]": 339,
529
+ "[PH-]": 340,
530
+ "[AlH3]": 341,
531
+ "[Ar]": 342,
532
+ "[U+4]": 343,
533
+ "[SnH2]": 344,
534
+ "[Cl+2]": 345,
535
+ "[si]": 346,
536
+ "[Fe+]": 347,
537
+ "[14CH3]": 348,
538
+ "[U+3]": 349,
539
+ "[Cl+]": 350,
540
+ "%43": 351,
541
+ "[GeH2]": 352,
542
+ "%44": 353,
543
+ "[Er+3]": 354,
544
+ "[Mo+3]": 355,
545
+ "[I+2]": 356,
546
+ "[Fe+4]": 357,
547
+ "[99Tc]": 358,
548
+ "%45": 359,
549
+ "[11C]": 360,
550
+ "%46": 361,
551
+ "[SnH3]": 362,
552
+ "[S]": 363,
553
+ "[Te+]": 364,
554
+ "[Er]": 365,
555
+ "[Lu+3]": 366,
556
+ "[11B]": 367,
557
+ "%47": 368,
558
+ "%48": 369,
559
+ "[P]": 370,
560
+ "[Tm]": 371,
561
+ "[Th]": 372,
562
+ "[Dy]": 373,
563
+ "[Pr+3]": 374,
564
+ "[Ta+5]": 375,
565
+ "[Nb+5]": 376,
566
+ "[Rb]": 377,
567
+ "[GeH3]": 378,
568
+ "[Br+2]": 379,
569
+ "%49": 380,
570
+ "[131I]": 381,
571
+ "[Fm]": 382,
572
+ "[Cs]": 383,
573
+ "[BH4-]": 384,
574
+ "[Lu]": 385,
575
+ "[15nH]": 386,
576
+ "%50": 387,
577
+ "[Ru+6]": 388,
578
+ "[b-]": 389,
579
+ "[Ho]": 390,
580
+ "[Th+4]": 391,
581
+ "[Ru+4]": 392,
582
+ "%52": 393,
583
+ "[14CH]": 394,
584
+ "%51": 395,
585
+ "[Cr+6]": 396,
586
+ "[18OH]": 397,
587
+ "[Ho+3]": 398,
588
+ "[Ce+4]": 399,
589
+ "[Bi+2]": 400,
590
+ "[Co+]": 401,
591
+ "%53": 402,
592
+ "[Yb+2]": 403,
593
+ "[Fe+6]": 404,
594
+ "[Be]": 405,
595
+ "%54": 406,
596
+ "[SH3+]": 407,
597
+ "[Np]": 408,
598
+ "[As-]": 409,
599
+ "%55": 410,
600
+ "[14C@@H]": 411,
601
+ "[Ir+2]": 412,
602
+ "[GaH3]": 413,
603
+ "[p-]": 414,
604
+ "[GeH4]": 415,
605
+ "[Sn+3]": 416,
606
+ "[Os+4]": 417,
607
+ "%56": 418,
608
+ "[14C@H]": 419,
609
+ "[sH+]": 420,
610
+ "[19F]": 421,
611
+ "[Eu+2]": 422,
612
+ "[TlH]": 423,
613
+ "%57": 424,
614
+ "[Cr+4]": 425,
615
+ "%58": 426,
616
+ "[B@@-]": 427,
617
+ "[SiH+]": 428,
618
+ "[At]": 429,
619
+ "[Am]": 430,
620
+ "[Fe+5]": 431,
621
+ "[AsH2]": 432,
622
+ "[Si+4]": 433,
623
+ "[B@-]": 434,
624
+ "[Pu]": 435,
625
+ "[SbH]": 436,
626
+ "[P-2]": 437,
627
+ "[Tm+3]": 438,
628
+ "*": 439,
629
+ "%59": 440,
630
+ "[se+]": 441,
631
+ "%60": 442,
632
+ "[oH+]": 443,
633
+ "[1H]": 444,
634
+ "[15N+]": 445,
635
+ "[124I]": 446,
636
+ "[S@@+]": 447,
637
+ "[P-3]": 448,
638
+ "[H]": 449,
639
+ "[IH2+]": 450,
640
+ "[TeH]": 451,
641
+ "[Xe]": 452,
642
+ "[PH4+]": 453,
643
+ "[Cr+]": 454,
644
+ "[Cm]": 455,
645
+ "[I+3]": 456,
646
+ "%61": 457,
647
+ "[Nb+2]": 458,
648
+ "[Ru+5]": 459,
649
+ "%62": 460,
650
+ "[Ta+2]": 461,
651
+ "[Tc+4]": 462,
652
+ "[CH3+]": 463,
653
+ "[Pm]": 464,
654
+ "[Si@H]": 465,
655
+ "[No]": 466,
656
+ "%63": 467,
657
+ "[Cr+5]": 468,
658
+ "[Th+2]": 469,
659
+ "[Zn-2]": 470,
660
+ "[13C@]": 471,
661
+ "[Lr]": 472,
662
+ "%64": 473,
663
+ "[99Tc+3]": 474,
664
+ "%65": 475,
665
+ "[13C@@]": 476,
666
+ "%66": 477,
667
+ "[Fe-]": 478,
668
+ "[17O]": 479,
669
+ "[siH]": 480,
670
+ "[Sb+]": 481,
671
+ "[OH]": 482,
672
+ "[IH]": 483,
673
+ "[11CH2]": 484,
674
+ "[Cf]": 485,
675
+ "[SiH2+]": 486,
676
+ "[Gd+2]": 487,
677
+ "[In+]": 488,
678
+ "[Si@@H]": 489,
679
+ "[Mn+]": 490,
680
+ "[99Tc+4]": 491,
681
+ "[Ga-]": 492,
682
+ "%67": 493,
683
+ "[S@+]": 494,
684
+ "[Ge+4]": 495,
685
+ "[Tl+3]": 496,
686
+ "[16OH]": 497,
687
+ "%68": 498,
688
+ "[2H-]": 499,
689
+ "[Ra]": 500,
690
+ "[si-]": 501,
691
+ "[NiH2]": 502,
692
+ "[P@@H]": 503,
693
+ "[Rh+]": 504,
694
+ "[12C]": 505,
695
+ "[35S]": 506,
696
+ "[32P]": 507,
697
+ "[SiH2-]": 508,
698
+ "[AlH2+]": 509,
699
+ "[16O]": 510,
700
+ "%69": 511,
701
+ "[BiH]": 512,
702
+ "[BiH2]": 513,
703
+ "[Zn-]": 514,
704
+ "[BH]": 515,
705
+ "[Tc+3]": 516,
706
+ "[Ir+]": 517,
707
+ "[Ni+]": 518,
708
+ "%70": 519,
709
+ "[InH2]": 520,
710
+ "[InH]": 521,
711
+ "[Nb+3]": 522,
712
+ "[PbH]": 523,
713
+ "[Bi+]": 524,
714
+ "%71": 525,
715
+ "[As+3]": 526,
716
+ "%72": 527,
717
+ "[18O-]": 528,
718
+ "[68Ga+3]": 529,
719
+ "%73": 530,
720
+ "[Pa]": 531,
721
+ "[76Br]": 532,
722
+ "[Tc+5]": 533,
723
+ "[pH+]": 534,
724
+ "[64Cu+2]": 535,
725
+ "[Ru+8]": 536,
726
+ "%74": 537,
727
+ "[PH2-]": 538,
728
+ "[Si+2]": 539,
729
+ "[17OH]": 540,
730
+ "[RuH]": 541,
731
+ "[111In+3]": 542,
732
+ "[AlH+]": 543,
733
+ "%75": 544,
734
+ "%76": 545,
735
+ "[W+]": 546,
736
+ "[SbH2]": 547,
737
+ "[PoH]": 548,
738
+ "[Ru-]": 549,
739
+ "[XeH]": 550,
740
+ "[Tc+2]": 551,
741
+ "[13C-]": 552,
742
+ "[Br+]": 553,
743
+ "[Pt-2]": 554,
744
+ "[Es]": 555,
745
+ "[Cu-]": 556,
746
+ "[Mg+]": 557,
747
+ "[3HH]": 558,
748
+ "[P@H]": 559,
749
+ "[ClH2+]": 560,
750
+ "%77": 561,
751
+ "[SH]": 562,
752
+ "[Au-]": 563,
753
+ "[2HH]": 564,
754
+ "%78": 565,
755
+ "[Sn-]": 566,
756
+ "[11CH]": 567,
757
+ "[PdH2]": 568,
758
+ "0": 569,
759
+ "[Os+6]": 570,
760
+ "%79": 571,
761
+ "[Mo+]": 572,
762
+ "%80": 573,
763
+ "[al]": 574,
764
+ "[PbH2]": 575,
765
+ "[64Cu]": 576,
766
+ "[Cl]": 577,
767
+ "[12CH3]": 578,
768
+ "%81": 579,
769
+ "[Tc+7]": 580,
770
+ "[11c]": 581,
771
+ "%82": 582,
772
+ "[Li-]": 583,
773
+ "[99Tc+5]": 584,
774
+ "[He]": 585,
775
+ "[12c]": 586,
776
+ "[Kr]": 587,
777
+ "[RuH+2]": 588,
778
+ "[35Cl]": 589,
779
+ "[Pd-2]": 590,
780
+ "[GaH2]": 591,
781
+ "[4H]": 592,
782
+ "[Sg]": 593,
783
+ "[Cu-2]": 594,
784
+ "[Br+3]": 595,
785
+ "%83": 596,
786
+ "[37Cl]": 597,
787
+ "[211At]": 598,
788
+ "[IrH+2]": 599,
789
+ "[Mt]": 600,
790
+ "[Ir-2]": 601,
791
+ "[In-]": 602,
792
+ "[12cH]": 603,
793
+ "[12CH2]": 604,
794
+ "[RuH2]": 605,
795
+ "[99Tc+7]": 606,
796
+ "%84": 607,
797
+ "[15n+]": 608,
798
+ "[ClH2+2]": 609,
799
+ "[16N]": 610,
800
+ "[111In]": 611,
801
+ "[Tc+]": 612,
802
+ "[Ru-2]": 613,
803
+ "[12CH]": 614,
804
+ "[si+]": 615,
805
+ "[Tc+6]": 616,
806
+ "%85": 617,
807
+ "%86": 618,
808
+ "[90Y]": 619,
809
+ "[Pd-]": 620,
810
+ "[188Re]": 621,
811
+ "[RuH+]": 622,
812
+ "[NiH]": 623,
813
+ "[SiH3-]": 624,
814
+ "[14n]": 625,
815
+ "[CH3]": 626,
816
+ "[14N]": 627,
817
+ "[10BH2]": 628,
818
+ "%88": 629,
819
+ "%89": 630,
820
+ "%90": 631,
821
+ "[34S]": 632,
822
+ "[77Br]": 633,
823
+ "[GaH]": 634,
824
+ "[Br]": 635,
825
+ "[Ge@]": 636,
826
+ "[B@@H-]": 637,
827
+ "[CuH]": 638,
828
+ "[SiH4]": 639,
829
+ "[3H-]": 640,
830
+ "%87": 641,
831
+ "%91": 642,
832
+ "%92": 643,
833
+ "[67Cu]": 644,
834
+ "[I]": 645,
835
+ "[177Lu]": 646,
836
+ "[ReH]": 647,
837
+ "[67Ga+3]": 648,
838
+ "[Db]": 649,
839
+ "[177Lu+3]": 650,
840
+ "[AlH2-]": 651,
841
+ "[Si+3]": 652,
842
+ "[Ti-2]": 653,
843
+ "[RuH+3]": 654,
844
+ "[al+]": 655,
845
+ "[68Ga]": 656,
846
+ "[2H+]": 657,
847
+ "[B@H-]": 658,
848
+ "[WH2]": 659,
849
+ "[OsH]": 660,
850
+ "[Ir-3]": 661,
851
+ "[AlH-]": 662,
852
+ "[Bk]": 663,
853
+ "[75Se]": 664,
854
+ "[14C@]": 665,
855
+ "[Pt-]": 666,
856
+ "[N@@H+]": 667,
857
+ "[Nb-]": 668,
858
+ "[13NH2]": 669,
859
+ "%93": 670,
860
+ "[186Re]": 671,
861
+ "[Tb+4]": 672,
862
+ "[PtH]": 673,
863
+ "[IrH2]": 674,
864
+ "[Hg-2]": 675,
865
+ "[AlH3-]": 676,
866
+ "[PdH+]": 677,
867
+ "[Md]": 678,
868
+ "[RhH+2]": 679,
869
+ "[11cH]": 680,
870
+ "[Co-2]": 681,
871
+ "[15N-]": 682,
872
+ "[ZrH2]": 683,
873
+ "%94": 684,
874
+ "[Hg-]": 685,
875
+ "[127I]": 686,
876
+ "[AsH2+]": 687,
877
+ "[MoH2]": 688,
878
+ "[Te+4]": 689,
879
+ "[14C@@]": 690,
880
+ "[As+5]": 691,
881
+ "[SnH+3]": 692,
882
+ "[Ge@@]": 693,
883
+ "[6Li+]": 694,
884
+ "[WH]": 695,
885
+ "[Ne]": 696,
886
+ "[14NH2]": 697,
887
+ "[14NH]": 698,
888
+ "[12C@@H]": 699,
889
+ "[Os+7]": 700,
890
+ "[RhH]": 701,
891
+ "[Al-3]": 702,
892
+ "[SnH+]": 703,
893
+ "[15NH3+]": 704,
894
+ "[Zr+]": 705,
895
+ "[197Hg+]": 706,
896
+ "%95": 707,
897
+ "%96": 708,
898
+ "[90Y+3]": 709,
899
+ "[Os-2]": 710,
900
+ "[98Tc+5]": 711,
901
+ "[15NH3]": 712,
902
+ "[bH-]": 713,
903
+ "[33P]": 714,
904
+ "[Zr-2]": 715,
905
+ "[15O]": 716,
906
+ "[Rh-]": 717,
907
+ "[PbH3]": 718,
908
+ "[PH2]": 719,
909
+ "[Ni-]": 720,
910
+ "[CuH+]": 721,
911
+ "%97": 722,
912
+ "%98": 723,
913
+ "%99": 724,
914
+ "[Os+5]": 725,
915
+ "[PtH+]": 726,
916
+ "[ReH4]": 727,
917
+ "[16NH]": 728,
918
+ "[82Br]": 729,
919
+ "[W-]": 730,
920
+ "[18F-]": 731,
921
+ "[15NH4+]": 732,
922
+ "[Se+4]": 733,
923
+ "[SeH-]": 734,
924
+ "[67Cu+2]": 735,
925
+ "[12C@H]": 736,
926
+ "[AsH3]": 737,
927
+ "[HgH]": 738,
928
+ "[10B-]": 739,
929
+ "[99Tc+6]": 740,
930
+ "[117Sn+4]": 741,
931
+ "[Te@]": 742,
932
+ "[P@+]": 743,
933
+ "[35SH]": 744,
934
+ "[SeH+]": 745,
935
+ "[Ni-2]": 746,
936
+ "[Al-2]": 747,
937
+ "[TeH2]": 748,
938
+ "[Bh]": 749,
939
+ "[99Tc+2]": 750,
940
+ "[Os+8]": 751,
941
+ "[PH-2]": 752,
942
+ "[7Li+]": 753,
943
+ "[14nH]": 754,
944
+ "[AlH+2]": 755,
945
+ "[18FH]": 756,
946
+ "[SnH4]": 757,
947
+ "[18O-2]": 758,
948
+ "[IrH]": 759,
949
+ "[13N]": 760,
950
+ "[Te@@]": 761,
951
+ "[Rh-3]": 762,
952
+ "[15NH+]": 763,
953
+ "[AsH3+]": 764,
954
+ "[SeH2]": 765,
955
+ "[AsH+]": 766,
956
+ "[CoH2]": 767,
957
+ "[16NH2]": 768,
958
+ "[AsH-]": 769,
959
+ "[203Hg+]": 770,
960
+ "[P@@+]": 771,
961
+ "[166Ho+3]": 772,
962
+ "[60Co+3]": 773,
963
+ "[13CH2-]": 774,
964
+ "[SeH2+]": 775,
965
+ "[75Br]": 776,
966
+ "[TlH2]": 777,
967
+ "[80Br]": 778,
968
+ "[siH+]": 779,
969
+ "[Ca+]": 780,
970
+ "[153Sm+3]": 781,
971
+ "[PdH]": 782,
972
+ "[225Ac]": 783,
973
+ "[13CH3-]": 784,
974
+ "[AlH4-]": 785,
975
+ "[FeH]": 786,
976
+ "[13CH-]": 787,
977
+ "[14C-]": 788,
978
+ "[11C-]": 789,
979
+ "[153Sm]": 790,
980
+ "[Re-]": 791,
981
+ "[te+]": 792,
982
+ "[13CH4]": 793,
983
+ "[ClH+2]": 794,
984
+ "[8CH2]": 795,
985
+ "[99Mo]": 796,
986
+ "[ClH3+3]": 797,
987
+ "[SbH3]": 798,
988
+ "[25Mg+2]": 799,
989
+ "[16N+]": 800,
990
+ "[SnH2+]": 801,
991
+ "[11C@H]": 802,
992
+ "[122I]": 803,
993
+ "[Re-2]": 804,
994
+ "[RuH2+2]": 805,
995
+ "[ZrH]": 806,
996
+ "[Bi-]": 807,
997
+ "[Pr+]": 808,
998
+ "[Rn]": 809,
999
+ "[Fr]": 810,
1000
+ "[36Cl]": 811,
1001
+ "[18o]": 812,
1002
+ "[YH]": 813,
1003
+ "[79Br]": 814,
1004
+ "[121I]": 815,
1005
+ "[113In+3]": 816,
1006
+ "[TaH]": 817,
1007
+ "[RhH2]": 818,
1008
+ "[Ta-]": 819,
1009
+ "[67Ga]": 820,
1010
+ "[ZnH+]": 821,
1011
+ "[SnH2-]": 822,
1012
+ "[OsH2]": 823,
1013
+ "[16F]": 824,
1014
+ "[FeH2]": 825,
1015
+ "[14O]": 826,
1016
+ "[PbH2+2]": 827,
1017
+ "[BH2]": 828,
1018
+ "[6H]": 829,
1019
+ "[125Te]": 830,
1020
+ "[197Hg]": 831,
1021
+ "[TaH2]": 832,
1022
+ "[TaH3]": 833,
1023
+ "[76As]": 834,
1024
+ "[Nb-2]": 835,
1025
+ "[14N+]": 836,
1026
+ "[125I-]": 837,
1027
+ "[33S]": 838,
1028
+ "[IH2+2]": 839,
1029
+ "[NH2]": 840,
1030
+ "[PtH2]": 841,
1031
+ "[MnH]": 842,
1032
+ "[19C]": 843,
1033
+ "[17F]": 844,
1034
+ "[1H-]": 845,
1035
+ "[SnH4+2]": 846,
1036
+ "[Mn-2]": 847,
1037
+ "[15NH2+]": 848,
1038
+ "[TiH2]": 849,
1039
+ "[ReH7]": 850,
1040
+ "[Cd-2]": 851,
1041
+ "[Fe-3]": 852,
1042
+ "[SH2]": 853,
1043
+ "[17O-]": 854,
1044
+ "[siH-]": 855,
1045
+ "[CoH+]": 856,
1046
+ "[VH]": 857,
1047
+ "[10BH]": 858,
1048
+ "[Ru-3]": 859,
1049
+ "[13O]": 860,
1050
+ "[5H]": 861,
1051
+ "[15n-]": 862,
1052
+ "[153Gd]": 863,
1053
+ "[12C@]": 864,
1054
+ "[11CH3-]": 865,
1055
+ "[IrH3]": 866,
1056
+ "[RuH3]": 867,
1057
+ "[74Se]": 868,
1058
+ "[Se@]": 869,
1059
+ "[Hf+]": 870,
1060
+ "[77Se]": 871,
1061
+ "[166Ho]": 872,
1062
+ "[59Fe+2]": 873,
1063
+ "[203Hg]": 874,
1064
+ "[18OH-]": 875,
1065
+ "[8CH]": 876,
1066
+ "[12C@@]": 877,
1067
+ "[11CH4]": 878,
1068
+ "[15C]": 879,
1069
+ "[249Cf]": 880,
1070
+ "[PbH4]": 881,
1071
+ "[64Zn]": 882,
1072
+ "[99Tc+]": 883,
1073
+ "[14c-]": 884,
1074
+ "[149Pm]": 885,
1075
+ "[IrH4]": 886,
1076
+ "[Se@@]": 887,
1077
+ "[13OH]": 888,
1078
+ "[14CH3-]": 889,
1079
+ "[28Si]": 890,
1080
+ "[Rh-2]": 891,
1081
+ "[Fe-2]": 892,
1082
+ "[131I-]": 893,
1083
+ "[51Cr]": 894,
1084
+ "[62Cu+2]": 895,
1085
+ "[81Br]": 896,
1086
+ "[121Sb]": 897,
1087
+ "[7Li]": 898,
1088
+ "[89Zr+4]": 899,
1089
+ "[SbH3+]": 900,
1090
+ "[11C@@H]": 901,
1091
+ "[98Tc]": 902,
1092
+ "[59Fe+3]": 903,
1093
+ "[BiH2+]": 904,
1094
+ "[SbH+]": 905,
1095
+ "[TiH]": 906,
1096
+ "[14NH3]": 907,
1097
+ "[15OH]": 908,
1098
+ "[119Sn]": 909,
1099
+ "[201Hg]": 910,
1100
+ "[MnH+]": 911,
1101
+ "[201Tl]": 912,
1102
+ "[51Cr+3]": 913,
1103
+ "[123I-]": 914,
1104
+ "[MoH]": 915,
1105
+ "[AlH6-3]": 916,
1106
+ "[MnH2]": 917,
1107
+ "[WH3]": 918,
1108
+ "[213Bi+3]": 919,
1109
+ "[SnH2+2]": 920,
1110
+ "[123IH]": 921,
1111
+ "[13CH+]": 922,
1112
+ "[Zr-]": 923,
1113
+ "[74As]": 924,
1114
+ "[13C+]": 925,
1115
+ "[32P+]": 926,
1116
+ "[KrH]": 927,
1117
+ "[SiH+2]": 928,
1118
+ "[ClH3+2]": 929,
1119
+ "[13NH]": 930,
1120
+ "[9CH2]": 931,
1121
+ "[ZrH2+2]": 932,
1122
+ "[87Sr+2]": 933,
1123
+ "[35s]": 934,
1124
+ "[239Pu]": 935,
1125
+ "[198Au]": 936,
1126
+ "[241Am]": 937,
1127
+ "[203Hg+2]": 938,
1128
+ "[V+]": 939,
1129
+ "[YH2]": 940,
1130
+ "[195Pt]": 941,
1131
+ "[203Pb]": 942,
1132
+ "[RuH4]": 943,
1133
+ "[ThH2]": 944,
1134
+ "[AuH]": 945,
1135
+ "[66Ga+3]": 946,
1136
+ "[11B-]": 947,
1137
+ "[F]": 948,
1138
+ "[24Na+]": 949,
1139
+ "[85Sr+2]": 950,
1140
+ "[201Tl+]": 951,
1141
+ "[14CH4]": 952,
1142
+ "[32S]": 953,
1143
+ "[TeH2+]": 954,
1144
+ "[ClH2+3]": 955,
1145
+ "[AgH]": 956,
1146
+ "[Ge@H]": 957,
1147
+ "[44Ca+2]": 958,
1148
+ "[Os-]": 959,
1149
+ "[31P]": 960,
1150
+ "[15nH+]": 961,
1151
+ "[SbH4]": 962,
1152
+ "[TiH+]": 963,
1153
+ "[Ba+]": 964,
1154
+ "[57Co+2]": 965,
1155
+ "[Ta+]": 966,
1156
+ "[125IH]": 967,
1157
+ "[77As]": 968,
1158
+ "[129I]": 969,
1159
+ "[Fe-4]": 970,
1160
+ "[Ta-2]": 971,
1161
+ "[19O]": 972,
1162
+ "[12O]": 973,
1163
+ "[BiH3]": 974,
1164
+ "[237Np]": 975,
1165
+ "[252Cf]": 976,
1166
+ "[86Y]": 977,
1167
+ "[Cr-2]": 978,
1168
+ "[89Y]": 979,
1169
+ "[195Pt+2]": 980,
1170
+ "[si+2]": 981,
1171
+ "[58Fe+2]": 982,
1172
+ "[Hs]": 983,
1173
+ "[S@@H]": 984,
1174
+ "[8CH4]": 985,
1175
+ "[164Dy+3]": 986,
1176
+ "[47Ca+2]": 987,
1177
+ "[57Co]": 988,
1178
+ "[NbH2]": 989,
1179
+ "[ReH2]": 990,
1180
+ "[ZnH2]": 991,
1181
+ "[CrH2]": 992,
1182
+ "[17NH]": 993,
1183
+ "[ZrH3]": 994,
1184
+ "[RhH3]": 995,
1185
+ "[12C-]": 996,
1186
+ "[18O+]": 997,
1187
+ "[Bi-2]": 998,
1188
+ "[ClH4+3]": 999,
1189
+ "[Ni-3]": 1000,
1190
+ "[Ag-]": 1001,
1191
+ "[111In-]": 1002,
1192
+ "[Mo-2]": 1003,
1193
+ "[55Fe+3]": 1004,
1194
+ "[204Hg+]": 1005,
1195
+ "[35Cl-]": 1006,
1196
+ "[211Pb]": 1007,
1197
+ "[75Ge]": 1008,
1198
+ "[8B]": 1009,
1199
+ "[TeH3]": 1010,
1200
+ "[SnH3+]": 1011,
1201
+ "[Zr-3]": 1012,
1202
+ "[28F]": 1013,
1203
+ "[249Bk]": 1014,
1204
+ "[169Yb]": 1015,
1205
+ "[34SH]": 1016,
1206
+ "[6Li]": 1017,
1207
+ "[94Tc]": 1018,
1208
+ "[197Au]": 1019,
1209
+ "[195Pt+4]": 1020,
1210
+ "[169Yb+3]": 1021,
1211
+ "[32Cl]": 1022,
1212
+ "[82Se]": 1023,
1213
+ "[159Gd+3]": 1024,
1214
+ "[213Bi]": 1025,
1215
+ "[CoH+2]": 1026,
1216
+ "[36S]": 1027,
1217
+ "[35P]": 1028,
1218
+ "[Ru-4]": 1029,
1219
+ "[Cr-3]": 1030,
1220
+ "[60Co]": 1031,
1221
+ "[1H+]": 1032,
1222
+ "[18CH2]": 1033,
1223
+ "[Cd-]": 1034,
1224
+ "[152Sm+3]": 1035,
1225
+ "[106Ru]": 1036,
1226
+ "[238Pu]": 1037,
1227
+ "[220Rn]": 1038,
1228
+ "[45Ca+2]": 1039,
1229
+ "[89Sr+2]": 1040,
1230
+ "[239Np]": 1041,
1231
+ "[90Sr+2]": 1042,
1232
+ "[137Cs+]": 1043,
1233
+ "[165Dy]": 1044,
1234
+ "[68GaH3]": 1045,
1235
+ "[65Zn+2]": 1046,
1236
+ "[89Zr]": 1047,
1237
+ "[BiH2+2]": 1048,
1238
+ "[62Cu]": 1049,
1239
+ "[165Dy+3]": 1050,
1240
+ "[238U]": 1051,
1241
+ "[105Rh+3]": 1052,
1242
+ "[70Zn]": 1053,
1243
+ "[12B]": 1054,
1244
+ "[12OH]": 1055,
1245
+ "[18CH]": 1056,
1246
+ "[17CH]": 1057,
1247
+ "[42K]": 1058,
1248
+ "[76Br-]": 1059,
1249
+ "[71As]": 1060,
1250
+ "[NbH3]": 1061,
1251
+ "[ReH3]": 1062,
1252
+ "[OsH-]": 1063,
1253
+ "[WH4]": 1064,
1254
+ "[MoH3]": 1065,
1255
+ "[OsH4]": 1066,
1256
+ "[RuH6]": 1067,
1257
+ "[PtH3]": 1068,
1258
+ "[CuH2]": 1069,
1259
+ "[CoH3]": 1070,
1260
+ "[TiH4]": 1071,
1261
+ "[64Zn+2]": 1072,
1262
+ "[Si-2]": 1073,
1263
+ "[79BrH]": 1074,
1264
+ "[14CH2-]": 1075,
1265
+ "[PtH2+2]": 1076,
1266
+ "[Os-3]": 1077,
1267
+ "[29Si]": 1078,
1268
+ "[Ti-]": 1079,
1269
+ "[Se+6]": 1080,
1270
+ "[22Na+]": 1081,
1271
+ "[42K+]": 1082,
1272
+ "[131Cs+]": 1083,
1273
+ "[86Rb+]": 1084,
1274
+ "[134Cs+]": 1085,
1275
+ "[209Po]": 1086,
1276
+ "[208Po]": 1087,
1277
+ "[81Rb+]": 1088,
1278
+ "[203Tl+]": 1089,
1279
+ "[Zr-4]": 1090,
1280
+ "[148Sm]": 1091,
1281
+ "[147Sm]": 1092,
1282
+ "[37Cl-]": 1093,
1283
+ "[12CH4]": 1094,
1284
+ "[Ge@@H]": 1095,
1285
+ "[63Cu]": 1096,
1286
+ "[13CH2+]": 1097,
1287
+ "[AsH2-]": 1098,
1288
+ "[CeH]": 1099,
1289
+ "[SnH-]": 1100,
1290
+ "[UH]": 1101,
1291
+ "[9c]": 1102,
1292
+ "[21CH3]": 1103,
1293
+ "[TeH+]": 1104,
1294
+ "[57Co+3]": 1105,
1295
+ "[8BH2]": 1106,
1296
+ "[12BH2]": 1107,
1297
+ "[19BH2]": 1108,
1298
+ "[9BH2]": 1109,
1299
+ "[YbH2]": 1110,
1300
+ "[CrH+2]": 1111,
1301
+ "[208Bi]": 1112,
1302
+ "[152Gd]": 1113,
1303
+ "[61Cu]": 1114,
1304
+ "[115In]": 1115,
1305
+ "[60Co+2]": 1116,
1306
+ "[13NH2-]": 1117,
1307
+ "[120I]": 1118,
1308
+ "[18OH2]": 1119,
1309
+ "[75SeH]": 1120,
1310
+ "[SbH2+]": 1121,
1311
+ "[144Ce]": 1122,
1312
+ "[16n]": 1123,
1313
+ "[113In]": 1124,
1314
+ "[22nH]": 1125,
1315
+ "[129I-]": 1126,
1316
+ "[InH3]": 1127,
1317
+ "[32PH3]": 1128,
1318
+ "[234U]": 1129,
1319
+ "[235U]": 1130,
1320
+ "[59Fe]": 1131,
1321
+ "[82Rb+]": 1132,
1322
+ "[65Zn]": 1133,
1323
+ "[244Cm]": 1134,
1324
+ "[147Pm]": 1135,
1325
+ "[91Y]": 1136,
1326
+ "[237Pu]": 1137,
1327
+ "[231Pa]": 1138,
1328
+ "[253Cf]": 1139,
1329
+ "[127Te]": 1140,
1330
+ "[187Re]": 1141,
1331
+ "[236Np]": 1142,
1332
+ "[235Np]": 1143,
1333
+ "[72Zn]": 1144,
1334
+ "[253Es]": 1145,
1335
+ "[159Dy]": 1146,
1336
+ "[62Zn]": 1147,
1337
+ "[101Tc]": 1148,
1338
+ "[149Tb]": 1149,
1339
+ "[124I-]": 1150,
1340
+ "[SeH3+]": 1151,
1341
+ "[210Pb]": 1152,
1342
+ "[40K]": 1153,
1343
+ "[210Po]": 1154,
1344
+ "[214Pb]": 1155,
1345
+ "[218Po]": 1156,
1346
+ "[214Po]": 1157,
1347
+ "[7Be]": 1158,
1348
+ "[212Pb]": 1159,
1349
+ "[205Pb]": 1160,
1350
+ "[209Pb]": 1161,
1351
+ "[123Te]": 1162,
1352
+ "[202Pb]": 1163,
1353
+ "[72As]": 1164,
1354
+ "[201Pb]": 1165,
1355
+ "[70As]": 1166,
1356
+ "[73Ge]": 1167,
1357
+ "[200Pb]": 1168,
1358
+ "[198Pb]": 1169,
1359
+ "[66Ga]": 1170,
1360
+ "[73Se]": 1171,
1361
+ "[195Pb]": 1172,
1362
+ "[199Pb]": 1173,
1363
+ "[144Ce+3]": 1174,
1364
+ "[235U+2]": 1175,
1365
+ "[90Tc]": 1176,
1366
+ "[114In+3]": 1177,
1367
+ "[128I]": 1178,
1368
+ "[100Tc+]": 1179,
1369
+ "[82Br-]": 1180,
1370
+ "[191Pt+2]": 1181,
1371
+ "[191Pt+4]": 1182,
1372
+ "[193Pt+4]": 1183,
1373
+ "[31PH3]": 1184,
1374
+ "[125I+2]": 1185,
1375
+ "[131I+2]": 1186,
1376
+ "[125Te+4]": 1187,
1377
+ "[82Sr+2]": 1188,
1378
+ "[149Sm]": 1189,
1379
+ "[81BrH]": 1190,
1380
+ "[129Xe]": 1191,
1381
+ "[193Pt+2]": 1192,
1382
+ "[123I+2]": 1193,
1383
+ "[Cr-]": 1194,
1384
+ "[Co-]": 1195,
1385
+ "[227Th+4]": 1196,
1386
+ "[249Cf+3]": 1197,
1387
+ "[252Cf+3]": 1198,
1388
+ "[187Os]": 1199,
1389
+ "[16O-]": 1200,
1390
+ "[17O+]": 1201,
1391
+ "[16OH-]": 1202,
1392
+ "[98Tc+7]": 1203,
1393
+ "[58Co+2]": 1204,
1394
+ "[69Ga+3]": 1205,
1395
+ "[57Fe+2]": 1206,
1396
+ "[43K+]": 1207,
1397
+ "[16C]": 1208,
1398
+ "[52Fe+3]": 1209,
1399
+ "[SeH5]": 1210,
1400
+ "[194Pb]": 1211,
1401
+ "[196Pb]": 1212,
1402
+ "[197Pb]": 1213,
1403
+ "[213Pb]": 1214,
1404
+ "[9B]": 1215,
1405
+ "[19B]": 1216,
1406
+ "[11CH-]": 1217,
1407
+ "[9CH]": 1218,
1408
+ "[20OH]": 1219,
1409
+ "[25OH]": 1220,
1410
+ "[8cH]": 1221,
1411
+ "[TiH+3]": 1222,
1412
+ "[SnH6+3]": 1223,
1413
+ "[N@H+]": 1224,
1414
+ "[52Mn+2]": 1225,
1415
+ "[64Ga]": 1226,
1416
+ "[13B]": 1227,
1417
+ "[216Bi]": 1228,
1418
+ "[117Sn+2]": 1229,
1419
+ "[232Th]": 1230,
1420
+ "[SnH+2]": 1231,
1421
+ "[BiH5]": 1232,
1422
+ "[77Kr]": 1233,
1423
+ "[103Cd]": 1234,
1424
+ "[62Ni]": 1235,
1425
+ "[LaH3]": 1236,
1426
+ "[SmH3]": 1237,
1427
+ "[EuH3]": 1238,
1428
+ "[MoH5]": 1239,
1429
+ "[64Ni]": 1240,
1430
+ "[66Zn]": 1241,
1431
+ "[68Zn]": 1242,
1432
+ "[186W]": 1243,
1433
+ "[FeH4]": 1244,
1434
+ "[MoH4]": 1245,
1435
+ "[HgH2]": 1246,
1436
+ "[15NH2-]": 1247,
1437
+ "[UH2]": 1248,
1438
+ "[204Hg]": 1249,
1439
+ "[GaH4-]": 1250,
1440
+ "[ThH4]": 1251,
1441
+ "[WH6]": 1252,
1442
+ "[PtH4]": 1253,
1443
+ "[VH2]": 1254,
1444
+ "[UH3]": 1255,
1445
+ "[FeH3]": 1256,
1446
+ "[RuH5]": 1257,
1447
+ "[BiH4]": 1258,
1448
+ "[80Br-]": 1259,
1449
+ "[CeH3]": 1260,
1450
+ "[37ClH]": 1261,
1451
+ "[157Gd+3]": 1262,
1452
+ "[205Tl]": 1263,
1453
+ "[203Tl]": 1264,
1454
+ "[62Cu+]": 1265,
1455
+ "[64Cu+]": 1266,
1456
+ "[61Cu+]": 1267,
1457
+ "[37SH2]": 1268,
1458
+ "[30Si]": 1269,
1459
+ "[28Al]": 1270,
1460
+ "[19OH2]": 1271,
1461
+ "[8He]": 1272,
1462
+ "[6He]": 1273,
1463
+ "[153Pm]": 1274,
1464
+ "[209Bi]": 1275,
1465
+ "[66Zn+2]": 1276,
1466
+ "[10CH4]": 1277,
1467
+ "[191Ir]": 1278,
1468
+ "[66Cu]": 1279,
1469
+ "[16O+]": 1280,
1470
+ "[25O]": 1281,
1471
+ "[10c]": 1282,
1472
+ "[Co-3]": 1283,
1473
+ "[Sn@@]": 1284,
1474
+ "[17OH-]": 1285,
1475
+ "[206Po]": 1286,
1476
+ "[204Po]": 1287,
1477
+ "[202Po]": 1288,
1478
+ "[201Po]": 1289,
1479
+ "[200Po]": 1290,
1480
+ "[199Po]": 1291,
1481
+ "[198Po]": 1292,
1482
+ "[197Po]": 1293,
1483
+ "[196Po]": 1294,
1484
+ "[195Po]": 1295,
1485
+ "[194Po]": 1296,
1486
+ "[193Po]": 1297,
1487
+ "[192Po]": 1298,
1488
+ "[191Po]": 1299,
1489
+ "[190Po]": 1300,
1490
+ "[217Po]": 1301,
1491
+ "[BiH4-]": 1302,
1492
+ "[TeH4]": 1303,
1493
+ "[222Ra]": 1304,
1494
+ "[62Ga]": 1305,
1495
+ "[39Ar]": 1306,
1496
+ "[144Sm]": 1307,
1497
+ "[58Fe]": 1308,
1498
+ "[153Eu]": 1309,
1499
+ "[85Rb]": 1310,
1500
+ "[171Yb]": 1311,
1501
+ "[172Yb]": 1312,
1502
+ "[114Cd]": 1313,
1503
+ "[51Fe]": 1314,
1504
+ "[142Ce]": 1315,
1505
+ "[207Tl]": 1316,
1506
+ "[92Mo]": 1317,
1507
+ "[115Sn]": 1318,
1508
+ "[140Ce]": 1319,
1509
+ "[202Hg]": 1320,
1510
+ "[180W]": 1321,
1511
+ "[182W]": 1322,
1512
+ "[183W]": 1323,
1513
+ "[184W]": 1324,
1514
+ "[96Mo]": 1325,
1515
+ "[47Ti]": 1326,
1516
+ "[111Cd]": 1327,
1517
+ "[143Nd]": 1328,
1518
+ "[145Nd]": 1329,
1519
+ "[126Te]": 1330,
1520
+ "[128Te]": 1331,
1521
+ "[130Te]": 1332,
1522
+ "[185Re]": 1333,
1523
+ "[97Mo]": 1334,
1524
+ "[98Mo]": 1335,
1525
+ "[183Re]": 1336,
1526
+ "[52V]": 1337,
1527
+ "[80Se]": 1338,
1528
+ "[87Kr]": 1339,
1529
+ "[137Xe]": 1340,
1530
+ "[196Au]": 1341,
1531
+ "[146Ce]": 1342,
1532
+ "[88Kr]": 1343,
1533
+ "[51Ti]": 1344,
1534
+ "[138Xe]": 1345,
1535
+ "[112Cd]": 1346,
1536
+ "[116Sn]": 1347,
1537
+ "[120Sn]": 1348,
1538
+ "[28SiH3]": 1349,
1539
+ "[35S-]": 1350,
1540
+ "[15NH-]": 1351,
1541
+ "[13CH3+]": 1352,
1542
+ "[34S+]": 1353,
1543
+ "[34s]": 1354,
1544
+ "[SiH4-]": 1355,
1545
+ "[100Tc+5]": 1356,
1546
+ "[NiH2+2]": 1357,
1547
+ "[239Th]": 1358,
1548
+ "[186Lu]": 1359,
1549
+ "[AuH3]": 1360,
1550
+ "[I@@-]": 1361,
1551
+ "[XeH2]": 1362,
1552
+ "[B+]": 1363,
1553
+ "[16CH2]": 1364,
1554
+ "[8C]": 1365,
1555
+ "[TaH5]": 1366,
1556
+ "[FeH4-]": 1367,
1557
+ "[19C@H]": 1368,
1558
+ "[10NH]": 1369,
1559
+ "[FeH6-3]": 1370,
1560
+ "[22CH]": 1371,
1561
+ "[25N]": 1372,
1562
+ "[25N+]": 1373,
1563
+ "[25N-]": 1374,
1564
+ "[21CH2]": 1375,
1565
+ "[18cH]": 1376,
1566
+ "[113I]": 1377,
1567
+ "[ScH3]": 1378,
1568
+ "[30PH3]": 1379,
1569
+ "[43Ca+2]": 1380,
1570
+ "[41Ca+2]": 1381,
1571
+ "[106Cd]": 1382,
1572
+ "[122Sn]": 1383,
1573
+ "[18CH3]": 1384,
1574
+ "[58Co+3]": 1385,
1575
+ "[98Tc+4]": 1386,
1576
+ "[70Ge]": 1387,
1577
+ "[76Ge]": 1388,
1578
+ "[108Cd]": 1389,
1579
+ "[116Cd]": 1390,
1580
+ "[130Xe]": 1391,
1581
+ "[94Mo]": 1392,
1582
+ "[124Sn]": 1393,
1583
+ "[186Os]": 1394,
1584
+ "[188Os]": 1395,
1585
+ "[190Os]": 1396,
1586
+ "[192Os]": 1397,
1587
+ "[106Pd]": 1398,
1588
+ "[110Pd]": 1399,
1589
+ "[120Te]": 1400,
1590
+ "[132Ba]": 1401,
1591
+ "[134Ba]": 1402,
1592
+ "[136Ba]": 1403,
1593
+ "[136Ce]": 1404,
1594
+ "[138Ce]": 1405,
1595
+ "[156Dy]": 1406,
1596
+ "[158Dy]": 1407,
1597
+ "[160Dy]": 1408,
1598
+ "[163Dy]": 1409,
1599
+ "[162Er]": 1410,
1600
+ "[164Er]": 1411,
1601
+ "[167Er]": 1412,
1602
+ "[176Hf]": 1413,
1603
+ "[26Mg]": 1414,
1604
+ "[144Nd]": 1415,
1605
+ "[150Nd]": 1416,
1606
+ "[41K]": 1417,
1607
+ "[46Ti]": 1418,
1608
+ "[48Ti]": 1419,
1609
+ "[49Ti]": 1420,
1610
+ "[50Ti]": 1421,
1611
+ "[170Yb]": 1422,
1612
+ "[173Yb]": 1423,
1613
+ "[91Zr]": 1424,
1614
+ "[92Zr]": 1425,
1615
+ "[96Zr]": 1426,
1616
+ "[34S-]": 1427,
1617
+ "[CuH2-]": 1428,
1618
+ "[38Cl]": 1429,
1619
+ "[25Mg]": 1430,
1620
+ "[51V]": 1431,
1621
+ "[93Nb]": 1432,
1622
+ "[95Mo]": 1433,
1623
+ "[45Sc]": 1434,
1624
+ "[123Sb]": 1435,
1625
+ "[139La]": 1436,
1626
+ "[9Be]": 1437,
1627
+ "[99Y+3]": 1438,
1628
+ "[99Y]": 1439,
1629
+ "[156Ho]": 1440,
1630
+ "[67Zn]": 1441,
1631
+ "[144Ce+4]": 1442,
1632
+ "[210Tl]": 1443,
1633
+ "[42Ca]": 1444,
1634
+ "[54Fe]": 1445,
1635
+ "[193Ir]": 1446,
1636
+ "[92Nb]": 1447,
1637
+ "[141Cs]": 1448,
1638
+ "[52Cr]": 1449,
1639
+ "[35ClH]": 1450,
1640
+ "[46Ca]": 1451,
1641
+ "[139Cs]": 1452,
1642
+ "[65Cu]": 1453,
1643
+ "[71Ga]": 1454,
1644
+ "[60Ni]": 1455,
1645
+ "[16NH3]": 1456,
1646
+ "[148Nd]": 1457,
1647
+ "[72Ge]": 1458,
1648
+ "[161Dy]": 1459,
1649
+ "[49Ca]": 1460,
1650
+ "[43Ca]": 1461,
1651
+ "[8Be]": 1462,
1652
+ "[48Ca]": 1463,
1653
+ "[44Ca]": 1464,
1654
+ "[120Xe]": 1465,
1655
+ "[80Rb]": 1466,
1656
+ "[215At]": 1467,
1657
+ "[180Re]": 1468,
1658
+ "[146Sm]": 1469,
1659
+ "[19Ne]": 1470,
1660
+ "[74Kr]": 1471,
1661
+ "[134La]": 1472,
1662
+ "[76Kr]": 1473,
1663
+ "[219Fr]": 1474,
1664
+ "[121Xe]": 1475,
1665
+ "[220Fr]": 1476,
1666
+ "[216At]": 1477,
1667
+ "[223Ac]": 1478,
1668
+ "[218At]": 1479,
1669
+ "[37Ar]": 1480,
1670
+ "[135I]": 1481,
1671
+ "[110Cd]": 1482,
1672
+ "[94Tc+7]": 1483,
1673
+ "[86Y+3]": 1484,
1674
+ "[135I-]": 1485,
1675
+ "[15O-2]": 1486,
1676
+ "[151Eu+3]": 1487,
1677
+ "[161Tb+3]": 1488,
1678
+ "[197Hg+2]": 1489,
1679
+ "[109Cd+2]": 1490,
1680
+ "[191Os+4]": 1491,
1681
+ "[170Tm+3]": 1492,
1682
+ "[205Bi+3]": 1493,
1683
+ "[233U+4]": 1494,
1684
+ "[126Sb+3]": 1495,
1685
+ "[127Sb+3]": 1496,
1686
+ "[132Cs+]": 1497,
1687
+ "[136Eu+3]": 1498,
1688
+ "[136Eu]": 1499,
1689
+ "[125Sn+4]": 1500,
1690
+ "[175Yb+3]": 1501,
1691
+ "[100Mo]": 1502,
1692
+ "[22Ne]": 1503,
1693
+ "[13c-]": 1504,
1694
+ "[13NH4+]": 1505,
1695
+ "[17C]": 1506,
1696
+ "[9C]": 1507,
1697
+ "[31S]": 1508,
1698
+ "[31SH]": 1509,
1699
+ "[133I]": 1510,
1700
+ "[126I]": 1511,
1701
+ "[36SH]": 1512,
1702
+ "[30S]": 1513,
1703
+ "[32SH]": 1514,
1704
+ "[19CH2]": 1515,
1705
+ "[19c]": 1516,
1706
+ "[18c]": 1517,
1707
+ "[15F]": 1518,
1708
+ "[10C]": 1519,
1709
+ "[RuH-]": 1520,
1710
+ "[62Zn+2]": 1521,
1711
+ "[32ClH]": 1522,
1712
+ "[33ClH]": 1523,
1713
+ "[78BrH]": 1524,
1714
+ "[12Li+]": 1525,
1715
+ "[12Li]": 1526,
1716
+ "[233Ra]": 1527,
1717
+ "[68Ge+4]": 1528,
1718
+ "[44Sc+3]": 1529,
1719
+ "[91Y+3]": 1530,
1720
+ "[106Ru+3]": 1531,
1721
+ "[PoH2]": 1532,
1722
+ "[AtH]": 1533,
1723
+ "[55Fe]": 1534,
1724
+ "[233U]": 1535,
1725
+ "[210PoH2]": 1536,
1726
+ "[230Th]": 1537,
1727
+ "[228Th]": 1538,
1728
+ "[222Rn]": 1539,
1729
+ "[35SH2]": 1540,
1730
+ "[227Th]": 1541,
1731
+ "[192Ir]": 1542,
1732
+ "[133Xe]": 1543,
1733
+ "[81Kr]": 1544,
1734
+ "[95Zr]": 1545,
1735
+ "[240Pu]": 1546,
1736
+ "[54Mn]": 1547,
1737
+ "[103Ru]": 1548,
1738
+ "[95Nb]": 1549,
1739
+ "[109Cd]": 1550,
1740
+ "[141Ce]": 1551,
1741
+ "[85Kr]": 1552,
1742
+ "[110Ag]": 1553,
1743
+ "[58Co]": 1554,
1744
+ "[241Pu]": 1555,
1745
+ "[234Th]": 1556,
1746
+ "[140La]": 1557,
1747
+ "[63Ni]": 1558,
1748
+ "[152Eu]": 1559,
1749
+ "[132IH]": 1560,
1750
+ "[226Rn]": 1561,
1751
+ "[154Eu]": 1562,
1752
+ "[36ClH]": 1563,
1753
+ "[228Ac]": 1564,
1754
+ "[155Eu]": 1565,
1755
+ "[106Rh]": 1566,
1756
+ "[243Am]": 1567,
1757
+ "[227Ac]": 1568,
1758
+ "[243Cm]": 1569,
1759
+ "[236U]": 1570,
1760
+ "[144Pr]": 1571,
1761
+ "[232U]": 1572,
1762
+ "[32SH2]": 1573,
1763
+ "[88Y]": 1574,
1764
+ "[82BrH]": 1575,
1765
+ "[135IH]": 1576,
1766
+ "[242Cm]": 1577,
1767
+ "[115Cd]": 1578,
1768
+ "[242Pu]": 1579,
1769
+ "[46Sc]": 1580,
1770
+ "[56Mn]": 1581,
1771
+ "[234Pa]": 1582,
1772
+ "[41Ar]": 1583,
1773
+ "[147Nd]": 1584,
1774
+ "[187W]": 1585,
1775
+ "[151Sm]": 1586,
1776
+ "[59Ni]": 1587,
1777
+ "[233Pa]": 1588,
1778
+ "[52Mn]": 1589,
1779
+ "[94Nb]": 1590,
1780
+ "[219Rn]": 1591,
1781
+ "[236Pu]": 1592,
1782
+ "[13NH3]": 1593,
1783
+ "[93Zr]": 1594,
1784
+ "[51Cr+6]": 1595,
1785
+ "[TlH3]": 1596,
1786
+ "[123Xe]": 1597,
1787
+ "[160Tb]": 1598,
1788
+ "[170Tm]": 1599,
1789
+ "[182Ta]": 1600,
1790
+ "[175Yb]": 1601,
1791
+ "[93Mo]": 1602,
1792
+ "[143Ce]": 1603,
1793
+ "[191Os]": 1604,
1794
+ "[126IH]": 1605,
1795
+ "[48V]": 1606,
1796
+ "[113Cd]": 1607,
1797
+ "[47Sc]": 1608,
1798
+ "[181Hf]": 1609,
1799
+ "[185W]": 1610,
1800
+ "[143Pr]": 1611,
1801
+ "[191Pt]": 1612,
1802
+ "[181W]": 1613,
1803
+ "[33PH3]": 1614,
1804
+ "[97Ru]": 1615,
1805
+ "[97Tc]": 1616,
1806
+ "[111Ag]": 1617,
1807
+ "[169Er]": 1618,
1808
+ "[107Pd]": 1619,
1809
+ "[103Ru+2]": 1620,
1810
+ "[34SH2]": 1621,
1811
+ "[137Ce]": 1622,
1812
+ "[242Am]": 1623,
1813
+ "[117SnH2]": 1624,
1814
+ "[57Ni]": 1625,
1815
+ "[239U]": 1626,
1816
+ "[60Cu]": 1627,
1817
+ "[250Cf]": 1628,
1818
+ "[193Au]": 1629,
1819
+ "[69Zn]": 1630,
1820
+ "[55Co]": 1631,
1821
+ "[139Ce]": 1632,
1822
+ "[127Xe]": 1633,
1823
+ "[159Gd]": 1634,
1824
+ "[56Co]": 1635,
1825
+ "[177Hf]": 1636,
1826
+ "[244Pu]": 1637,
1827
+ "[38ClH]": 1638,
1828
+ "[142Pr]": 1639,
1829
+ "[199Hg]": 1640,
1830
+ "[179Hf]": 1641,
1831
+ "[178Hf]": 1642,
1832
+ "[237U]": 1643,
1833
+ "[156Eu]": 1644,
1834
+ "[157Eu]": 1645,
1835
+ "[105Ru]": 1646,
1836
+ "[171Tm]": 1647,
1837
+ "[199Au]": 1648,
1838
+ "[155Sm]": 1649,
1839
+ "[80BrH]": 1650,
1840
+ "[108Ag]": 1651,
1841
+ "[128IH]": 1652,
1842
+ "[48Sc]": 1653,
1843
+ "[45Ti]": 1654,
1844
+ "[176Lu]": 1655,
1845
+ "[121SnH2]": 1656,
1846
+ "[148Pm]": 1657,
1847
+ "[57Fe]": 1658,
1848
+ "[10BH3]": 1659,
1849
+ "[96Tc]": 1660,
1850
+ "[133IH]": 1661,
1851
+ "[143Pm]": 1662,
1852
+ "[105Rh]": 1663,
1853
+ "[130IH]": 1664,
1854
+ "[134IH]": 1665,
1855
+ "[131IH]": 1666,
1856
+ "[71Zn]": 1667,
1857
+ "[105Ag]": 1668,
1858
+ "[97Zr]": 1669,
1859
+ "[235Pu]": 1670,
1860
+ "[231Th]": 1671,
1861
+ "[109Pd]": 1672,
1862
+ "[93Y]": 1673,
1863
+ "[190Ir]": 1674,
1864
+ "[135Xe]": 1675,
1865
+ "[53Mn]": 1676,
1866
+ "[134Ce]": 1677,
1867
+ "[234Np]": 1678,
1868
+ "[240Am]": 1679,
1869
+ "[246Cf]": 1680,
1870
+ "[240Cm]": 1681,
1871
+ "[241Cm]": 1682,
1872
+ "[226Th]": 1683,
1873
+ "[39ClH]": 1684,
1874
+ "[229Th]": 1685,
1875
+ "[245Cm]": 1686,
1876
+ "[240U]": 1687,
1877
+ "[240Np]": 1688,
1878
+ "[249Cm]": 1689,
1879
+ "[243Pu]": 1690,
1880
+ "[145Pm]": 1691,
1881
+ "[199Pt]": 1692,
1882
+ "[246Bk]": 1693,
1883
+ "[193Pt]": 1694,
1884
+ "[230U]": 1695,
1885
+ "[250Cm]": 1696,
1886
+ "[44Ti]": 1697,
1887
+ "[175Hf]": 1698,
1888
+ "[254Fm]": 1699,
1889
+ "[255Fm]": 1700,
1890
+ "[257Fm]": 1701,
1891
+ "[92Y]": 1702,
1892
+ "[188Ir]": 1703,
1893
+ "[171Lu]": 1704,
1894
+ "[257Md]": 1705,
1895
+ "[247Bk]": 1706,
1896
+ "[121IH]": 1707,
1897
+ "[250Bk]": 1708,
1898
+ "[179Lu]": 1709,
1899
+ "[224Ac]": 1710,
1900
+ "[195Hg]": 1711,
1901
+ "[244Am]": 1712,
1902
+ "[246Pu]": 1713,
1903
+ "[194Au]": 1714,
1904
+ "[252Fm]": 1715,
1905
+ "[173Hf]": 1716,
1906
+ "[246Cm]": 1717,
1907
+ "[135Ce]": 1718,
1908
+ "[49Cr]": 1719,
1909
+ "[248Cf]": 1720,
1910
+ "[247Cm]": 1721,
1911
+ "[248Cm]": 1722,
1912
+ "[174Ta]": 1723,
1913
+ "[176Ta]": 1724,
1914
+ "[154Tb]": 1725,
1915
+ "[172Ta]": 1726,
1916
+ "[177Ta]": 1727,
1917
+ "[175Ta]": 1728,
1918
+ "[180Ta]": 1729,
1919
+ "[158Tb]": 1730,
1920
+ "[115Ag]": 1731,
1921
+ "[189Os]": 1732,
1922
+ "[251Cf]": 1733,
1923
+ "[145Pr]": 1734,
1924
+ "[147Pr]": 1735,
1925
+ "[76BrH]": 1736,
1926
+ "[102Rh]": 1737,
1927
+ "[238Np]": 1738,
1928
+ "[185Os]": 1739,
1929
+ "[246Am]": 1740,
1930
+ "[233Np]": 1741,
1931
+ "[166Dy]": 1742,
1932
+ "[254Es]": 1743,
1933
+ "[244Cf]": 1744,
1934
+ "[193Os]": 1745,
1935
+ "[245Am]": 1746,
1936
+ "[245Bk]": 1747,
1937
+ "[239Am]": 1748,
1938
+ "[238Am]": 1749,
1939
+ "[97Nb]": 1750,
1940
+ "[245Pu]": 1751,
1941
+ "[254Cf]": 1752,
1942
+ "[188W]": 1753,
1943
+ "[250Es]": 1754,
1944
+ "[251Es]": 1755,
1945
+ "[237Am]": 1756,
1946
+ "[182Hf]": 1757,
1947
+ "[258Md]": 1758,
1948
+ "[232Np]": 1759,
1949
+ "[238Cm]": 1760,
1950
+ "[60Fe]": 1761,
1951
+ "[109Pd+2]": 1762,
1952
+ "[234Pu]": 1763,
1953
+ "[141Ce+3]": 1764,
1954
+ "[136Nd]": 1765,
1955
+ "[136Pr]": 1766,
1956
+ "[173Ta]": 1767,
1957
+ "[110Ru]": 1768,
1958
+ "[147Tb]": 1769,
1959
+ "[253Fm]": 1770,
1960
+ "[139Nd]": 1771,
1961
+ "[178Re]": 1772,
1962
+ "[177Re]": 1773,
1963
+ "[200Au]": 1774,
1964
+ "[182Re]": 1775,
1965
+ "[156Tb]": 1776,
1966
+ "[155Tb]": 1777,
1967
+ "[157Tb]": 1778,
1968
+ "[161Tb]": 1779,
1969
+ "[161Ho]": 1780,
1970
+ "[167Tm]": 1781,
1971
+ "[173Lu]": 1782,
1972
+ "[179Ta]": 1783,
1973
+ "[171Er]": 1784,
1974
+ "[44Sc]": 1785,
1975
+ "[49Sc]": 1786,
1976
+ "[49V]": 1787,
1977
+ "[51Mn]": 1788,
1978
+ "[90Nb]": 1789,
1979
+ "[88Nb]": 1790,
1980
+ "[88Zr]": 1791,
1981
+ "[36SH2]": 1792,
1982
+ "[174Yb]": 1793,
1983
+ "[178Lu]": 1794,
1984
+ "[179W]": 1795,
1985
+ "[83BrH]": 1796,
1986
+ "[107Cd]": 1797,
1987
+ "[75BrH]": 1798,
1988
+ "[62Co]": 1799,
1989
+ "[48Cr]": 1800,
1990
+ "[63Zn]": 1801,
1991
+ "[102Ag]": 1802,
1992
+ "[154Sm]": 1803,
1993
+ "[168Er]": 1804,
1994
+ "[65Ni]": 1805,
1995
+ "[137La]": 1806,
1996
+ "[187Ir]": 1807,
1997
+ "[144Pm]": 1808,
1998
+ "[146Pm]": 1809,
1999
+ "[160Gd]": 1810,
2000
+ "[166Yb]": 1811,
2001
+ "[162Dy]": 1812,
2002
+ "[47V]": 1813,
2003
+ "[141Nd]": 1814,
2004
+ "[141Sm]": 1815,
2005
+ "[166Er]": 1816,
2006
+ "[150Sm]": 1817,
2007
+ "[146Eu]": 1818,
2008
+ "[149Eu]": 1819,
2009
+ "[174Lu]": 1820,
2010
+ "[17NH3]": 1821,
2011
+ "[102Ru]": 1822,
2012
+ "[170Hf]": 1823,
2013
+ "[188Pt]": 1824,
2014
+ "[61Ni]": 1825,
2015
+ "[56Ni]": 1826,
2016
+ "[149Gd]": 1827,
2017
+ "[151Gd]": 1828,
2018
+ "[141Pm]": 1829,
2019
+ "[147Gd]": 1830,
2020
+ "[146Gd]": 1831,
2021
+ "[161Er]": 1832,
2022
+ "[103Ag]": 1833,
2023
+ "[145Eu]": 1834,
2024
+ "[153Tb]": 1835,
2025
+ "[155Dy]": 1836,
2026
+ "[184Re]": 1837,
2027
+ "[180Os]": 1838,
2028
+ "[182Os]": 1839,
2029
+ "[186Pt]": 1840,
2030
+ "[181Os]": 1841,
2031
+ "[181Re]": 1842,
2032
+ "[151Tb]": 1843,
2033
+ "[178Ta]": 1844,
2034
+ "[178W]": 1845,
2035
+ "[189Pt]": 1846,
2036
+ "[194Hg]": 1847,
2037
+ "[145Sm]": 1848,
2038
+ "[150Tb]": 1849,
2039
+ "[132La]": 1850,
2040
+ "[158Gd]": 1851,
2041
+ "[104Ag]": 1852,
2042
+ "[193Hg]": 1853,
2043
+ "[94Ru]": 1854,
2044
+ "[137Pr]": 1855,
2045
+ "[155Ho]": 1856,
2046
+ "[117Cd]": 1857,
2047
+ "[99Ru]": 1858,
2048
+ "[146Nd]": 1859,
2049
+ "[218Rn]": 1860,
2050
+ "[95Y]": 1861,
2051
+ "[79Kr]": 1862,
2052
+ "[120IH]": 1863,
2053
+ "[138Pr]": 1864,
2054
+ "[100Pd]": 1865,
2055
+ "[166Tm]": 1866,
2056
+ "[90Mo]": 1867,
2057
+ "[151Nd]": 1868,
2058
+ "[231U]": 1869,
2059
+ "[138Nd]": 1870,
2060
+ "[89Nb]": 1871,
2061
+ "[98Nb]": 1872,
2062
+ "[162Ho]": 1873,
2063
+ "[142Sm]": 1874,
2064
+ "[186Ta]": 1875,
2065
+ "[104Tc]": 1876,
2066
+ "[184Ta]": 1877,
2067
+ "[185Ta]": 1878,
2068
+ "[170Er]": 1879,
2069
+ "[107Rh]": 1880,
2070
+ "[131La]": 1881,
2071
+ "[169Lu]": 1882,
2072
+ "[74BrH]": 1883,
2073
+ "[150Pm]": 1884,
2074
+ "[172Tm]": 1885,
2075
+ "[197Pt]": 1886,
2076
+ "[230Pu]": 1887,
2077
+ "[170Lu]": 1888,
2078
+ "[86Zr]": 1889,
2079
+ "[176W]": 1890,
2080
+ "[177W]": 1891,
2081
+ "[101Pd]": 1892,
2082
+ "[105Pd]": 1893,
2083
+ "[108Pd]": 1894,
2084
+ "[149Nd]": 1895,
2085
+ "[164Ho]": 1896,
2086
+ "[159Ho]": 1897,
2087
+ "[167Ho]": 1898,
2088
+ "[176Yb]": 1899,
2089
+ "[156Sm]": 1900,
2090
+ "[77BrH]": 1901,
2091
+ "[189Re]": 1902,
2092
+ "[99Rh]": 1903,
2093
+ "[100Rh]": 1904,
2094
+ "[151Pm]": 1905,
2095
+ "[232Pa]": 1906,
2096
+ "[228Pa]": 1907,
2097
+ "[230Pa]": 1908,
2098
+ "[66Ni]": 1909,
2099
+ "[194Os]": 1910,
2100
+ "[135La]": 1911,
2101
+ "[138La]": 1912,
2102
+ "[141La]": 1913,
2103
+ "[142La]": 1914,
2104
+ "[195Ir]": 1915,
2105
+ "[96Nb]": 1916,
2106
+ "[157Ho]": 1917,
2107
+ "[183Hf]": 1918,
2108
+ "[162Tm]": 1919,
2109
+ "[172Er]": 1920,
2110
+ "[148Eu]": 1921,
2111
+ "[150Eu]": 1922,
2112
+ "[15CH4]": 1923,
2113
+ "[89Kr]": 1924,
2114
+ "[143La]": 1925,
2115
+ "[58Ni]": 1926,
2116
+ "[61Co]": 1927,
2117
+ "[158Eu]": 1928,
2118
+ "[165Er]": 1929,
2119
+ "[167Yb]": 1930,
2120
+ "[173Tm]": 1931,
2121
+ "[175Tm]": 1932,
2122
+ "[172Hf]": 1933,
2123
+ "[172Lu]": 1934,
2124
+ "[93Tc]": 1935,
2125
+ "[177Yb]": 1936,
2126
+ "[124IH]": 1937,
2127
+ "[194Ir]": 1938,
2128
+ "[147Eu]": 1939,
2129
+ "[101Mo]": 1940,
2130
+ "[180Hf]": 1941,
2131
+ "[189Ir]": 1942,
2132
+ "[87Y]": 1943,
2133
+ "[43Sc]": 1944,
2134
+ "[195Au]": 1945,
2135
+ "[112Ag]": 1946,
2136
+ "[84BrH]": 1947,
2137
+ "[106Ag]": 1948,
2138
+ "[109Ag]": 1949,
2139
+ "[101Rh]": 1950,
2140
+ "[162Yb]": 1951,
2141
+ "[228Rn]": 1952,
2142
+ "[139Pr]": 1953,
2143
+ "[94Y]": 1954,
2144
+ "[201Au]": 1955,
2145
+ "[40PH3]": 1956,
2146
+ "[110Ag+]": 1957,
2147
+ "[104Cd]": 1958,
2148
+ "[133Ba+2]": 1959,
2149
+ "[226Ac]": 1960,
2150
+ "[145Gd]": 1961,
2151
+ "[186Ir]": 1962,
2152
+ "[184Ir]": 1963,
2153
+ "[224Rn]": 1964,
2154
+ "[185Ir]": 1965,
2155
+ "[182Ir]": 1966,
2156
+ "[184Hf]": 1967,
2157
+ "[200Pt]": 1968,
2158
+ "[227Pa]": 1969,
2159
+ "[178Yb]": 1970,
2160
+ "[72Br-]": 1971,
2161
+ "[72BrH]": 1972,
2162
+ "[248Am]": 1973,
2163
+ "[238Th]": 1974,
2164
+ "[161Gd]": 1975,
2165
+ "[35S-2]": 1976,
2166
+ "[107Ag]": 1977,
2167
+ "[FeH6-4]": 1978,
2168
+ "[89Sr]": 1979,
2169
+ "[SnH3-]": 1980,
2170
+ "[SeH3]": 1981,
2171
+ "[TeH3+]": 1982,
2172
+ "[SbH4+]": 1983,
2173
+ "[AsH4+]": 1984,
2174
+ "[4He]": 1985,
2175
+ "[AsH3-]": 1986,
2176
+ "[1HH]": 1987,
2177
+ "[3H+]": 1988,
2178
+ "[82Rb]": 1989,
2179
+ "[85Sr]": 1990,
2180
+ "[90Sr]": 1991,
2181
+ "[137Cs]": 1992,
2182
+ "[133Ba]": 1993,
2183
+ "[131Cs]": 1994,
2184
+ "[SbH5]": 1995,
2185
+ "[224Ra]": 1996,
2186
+ "[22Na]": 1997,
2187
+ "[210Bi]": 1998,
2188
+ "[214Bi]": 1999,
2189
+ "[228Ra]": 2000,
2190
+ "[127Sb]": 2001,
2191
+ "[136Cs]": 2002,
2192
+ "[125Sb]": 2003,
2193
+ "[134Cs]": 2004,
2194
+ "[140Ba]": 2005,
2195
+ "[45Ca]": 2006,
2196
+ "[206Pb]": 2007,
2197
+ "[207Pb]": 2008,
2198
+ "[24Na]": 2009,
2199
+ "[86Rb]": 2010,
2200
+ "[212Bi]": 2011,
2201
+ "[208Pb]": 2012,
2202
+ "[124Sb]": 2013,
2203
+ "[204Pb]": 2014,
2204
+ "[44K]": 2015,
2205
+ "[129Te]": 2016,
2206
+ "[113Sn]": 2017,
2207
+ "[204Tl]": 2018,
2208
+ "[87Sr]": 2019,
2209
+ "[208Tl]": 2020,
2210
+ "[87Rb]": 2021,
2211
+ "[47Ca]": 2022,
2212
+ "[135Cs]": 2023,
2213
+ "[216Po]": 2024,
2214
+ "[137Ba]": 2025,
2215
+ "[207Bi]": 2026,
2216
+ "[212Po]": 2027,
2217
+ "[79Se]": 2028,
2218
+ "[223Ra]": 2029,
2219
+ "[86Sr]": 2030,
2220
+ "[122Sb]": 2031,
2221
+ "[26Al]": 2032,
2222
+ "[32Si]": 2033,
2223
+ "[126Sn]": 2034,
2224
+ "[225Ra]": 2035,
2225
+ "[114In]": 2036,
2226
+ "[72Ga]": 2037,
2227
+ "[132Te]": 2038,
2228
+ "[10Be]": 2039,
2229
+ "[125Sn]": 2040,
2230
+ "[73As]": 2041,
2231
+ "[206Bi]": 2042,
2232
+ "[117Sn]": 2043,
2233
+ "[40Ca]": 2044,
2234
+ "[41Ca]": 2045,
2235
+ "[89Rb]": 2046,
2236
+ "[116In]": 2047,
2237
+ "[129Sb]": 2048,
2238
+ "[91Sr]": 2049,
2239
+ "[71Ge]": 2050,
2240
+ "[139Ba]": 2051,
2241
+ "[69Ga]": 2052,
2242
+ "[120Sb]": 2053,
2243
+ "[121Sn]": 2054,
2244
+ "[123Sn]": 2055,
2245
+ "[131Te]": 2056,
2246
+ "[77Ge]": 2057,
2247
+ "[135Ba]": 2058,
2248
+ "[82Sr]": 2059,
2249
+ "[43K]": 2060,
2250
+ "[131Ba]": 2061,
2251
+ "[92Sr]": 2062,
2252
+ "[88Rb]": 2063,
2253
+ "[129Cs]": 2064,
2254
+ "[144Cs]": 2065,
2255
+ "[127Cs]": 2066,
2256
+ "[200Tl]": 2067,
2257
+ "[202Tl]": 2068,
2258
+ "[141Ba]": 2069,
2259
+ "[117Sb]": 2070,
2260
+ "[116Sb]": 2071,
2261
+ "[78As]": 2072,
2262
+ "[131Sb]": 2073,
2263
+ "[126Sb]": 2074,
2264
+ "[128Sb]": 2075,
2265
+ "[130Sb]": 2076,
2266
+ "[67Ge]": 2077,
2267
+ "[68Ge]": 2078,
2268
+ "[78Ge]": 2079,
2269
+ "[66Ge]": 2080,
2270
+ "[223Fr]": 2081,
2271
+ "[132Cs]": 2082,
2272
+ "[125Cs]": 2083,
2273
+ "[138Cs]": 2084,
2274
+ "[133Te]": 2085,
2275
+ "[84Rb]": 2086,
2276
+ "[83Rb]": 2087,
2277
+ "[81Rb]": 2088,
2278
+ "[142Ba]": 2089,
2279
+ "[200Bi]": 2090,
2280
+ "[115Sb]": 2091,
2281
+ "[194Tl]": 2092,
2282
+ "[70Se]": 2093,
2283
+ "[112In]": 2094,
2284
+ "[118Sb]": 2095,
2285
+ "[70Ga]": 2096,
2286
+ "[27Mg]": 2097,
2287
+ "[202Bi]": 2098,
2288
+ "[83Se]": 2099,
2289
+ "[9Li]": 2100,
2290
+ "[69As]": 2101,
2291
+ "[79Rb]": 2102,
2292
+ "[81Sr]": 2103,
2293
+ "[83Sr]": 2104,
2294
+ "[78Se]": 2105,
2295
+ "[109In]": 2106,
2296
+ "[29Al]": 2107,
2297
+ "[118Sn]": 2108,
2298
+ "[117In]": 2109,
2299
+ "[119Sb]": 2110,
2300
+ "[114Sn]": 2111,
2301
+ "[138Ba]": 2112,
2302
+ "[69Ge]": 2113,
2303
+ "[73Ga]": 2114,
2304
+ "[74Ge]": 2115,
2305
+ "[206Tl]": 2116,
2306
+ "[199Tl]": 2117,
2307
+ "[130Cs]": 2118,
2308
+ "[28Mg]": 2119,
2309
+ "[116Te]": 2120,
2310
+ "[112Sn]": 2121,
2311
+ "[126Ba]": 2122,
2312
+ "[211Bi]": 2123,
2313
+ "[81Se]": 2124,
2314
+ "[127Sn]": 2125,
2315
+ "[143Cs]": 2126,
2316
+ "[134Te]": 2127,
2317
+ "[80Sr]": 2128,
2318
+ "[45K]": 2129,
2319
+ "[215Po]": 2130,
2320
+ "[207Po]": 2131,
2321
+ "[111Sn]": 2132,
2322
+ "[211Po]": 2133,
2323
+ "[128Ba]": 2134,
2324
+ "[198Tl]": 2135,
2325
+ "[227Ra]": 2136,
2326
+ "[213Po]": 2137,
2327
+ "[220Ra]": 2138,
2328
+ "[128Sn]": 2139,
2329
+ "[203Po]": 2140,
2330
+ "[205Po]": 2141,
2331
+ "[65Ga]": 2142,
2332
+ "[197Tl]": 2143,
2333
+ "[88Sr]": 2144,
2334
+ "[110In]": 2145,
2335
+ "[31Si]": 2146,
2336
+ "[201Bi]": 2147,
2337
+ "[121Te]": 2148,
2338
+ "[205Bi]": 2149,
2339
+ "[203Bi]": 2150,
2340
+ "[195Tl]": 2151,
2341
+ "[209Tl]": 2152,
2342
+ "[110Sn]": 2153,
2343
+ "[222Fr]": 2154,
2344
+ "[207At]": 2155,
2345
+ "[119In]": 2156,
2346
+ "[As@]": 2157,
2347
+ "[129IH]": 2158,
2348
+ "[157Dy]": 2159,
2349
+ "[111IH]": 2160,
2350
+ "[230Ra]": 2161,
2351
+ "[144Pr+3]": 2162,
2352
+ "[SiH3+]": 2163,
2353
+ "[3He]": 2164,
2354
+ "[AsH5]": 2165,
2355
+ "[72Se]": 2166,
2356
+ "[95Tc]": 2167,
2357
+ "[103Pd]": 2168,
2358
+ "[121Sn+2]": 2169,
2359
+ "[211Rn]": 2170,
2360
+ "[38SH2]": 2171,
2361
+ "[127IH]": 2172,
2362
+ "[74Br-]": 2173,
2363
+ "[133I-]": 2174,
2364
+ "[100Tc+4]": 2175,
2365
+ "[100Tc]": 2176,
2366
+ "[36Cl-]": 2177,
2367
+ "[89Y+3]": 2178,
2368
+ "[104Rh]": 2179,
2369
+ "[152Sm]": 2180,
2370
+ "[226Ra]": 2181,
2371
+ "[19FH]": 2182,
2372
+ "[104Pd]": 2183,
2373
+ "[148Gd]": 2184,
2374
+ "[157Lu]": 2185,
2375
+ "[33SH2]": 2186,
2376
+ "[121I-]": 2187,
2377
+ "[17FH]": 2188,
2378
+ "[71Se]": 2189,
2379
+ "[157Sm]": 2190,
2380
+ "[148Tb]": 2191,
2381
+ "[164Dy]": 2192,
2382
+ "[15OH2]": 2193,
2383
+ "[15O+]": 2194,
2384
+ "[39K]": 2195,
2385
+ "[40Ar]": 2196,
2386
+ "[50Cr+3]": 2197,
2387
+ "[50Cr]": 2198,
2388
+ "[52Ti]": 2199,
2389
+ "[103Pd+2]": 2200,
2390
+ "[130Ba]": 2201,
2391
+ "[142Pm]": 2202,
2392
+ "[153Gd+3]": 2203,
2393
+ "[151Eu]": 2204,
2394
+ "[103Rh]": 2205,
2395
+ "[124Xe]": 2206,
2396
+ "[152Tb]": 2207,
2397
+ "[17OH2]": 2208,
2398
+ "[20Ne]": 2209,
2399
+ "[52Fe]": 2210,
2400
+ "[94Zr+4]": 2211,
2401
+ "[94Zr]": 2212,
2402
+ "[149Pr]": 2213,
2403
+ "[16OH2]": 2214,
2404
+ "[53Cr+6]": 2215,
2405
+ "[53Cr]": 2216,
2406
+ "[81Br-]": 2217,
2407
+ "[112Pd]": 2218,
2408
+ "[125Xe]": 2219,
2409
+ "[155Gd]": 2220,
2410
+ "[157Gd]": 2221,
2411
+ "[168Yb]": 2222,
2412
+ "[184Os]": 2223,
2413
+ "[166Tb]": 2224,
2414
+ "[221Fr]": 2225,
2415
+ "[212Ra]": 2226,
2416
+ "[75Br-]": 2227,
2417
+ "[79Br-]": 2228,
2418
+ "[113Ag]": 2229,
2419
+ "[23Na]": 2230,
2420
+ "[34Cl-]": 2231,
2421
+ "[34ClH]": 2232,
2422
+ "[38Cl-]": 2233,
2423
+ "[56Fe]": 2234,
2424
+ "[68Cu]": 2235,
2425
+ "[77Br-]": 2236,
2426
+ "[90Zr+4]": 2237,
2427
+ "[90Zr]": 2238,
2428
+ "[102Pd]": 2239,
2429
+ "[154Eu+3]": 2240,
2430
+ "[57Mn]": 2241,
2431
+ "[165Tm]": 2242,
2432
+ "[152Dy]": 2243,
2433
+ "[217At]": 2244,
2434
+ "[77se]": 2245,
2435
+ "[13cH-]": 2246,
2436
+ "[122Te]": 2247,
2437
+ "[156Gd]": 2248,
2438
+ "[124Te]": 2249,
2439
+ "[53Ni]": 2250,
2440
+ "[131Xe]": 2251,
2441
+ "[174Hf+4]": 2252,
2442
+ "[174Hf]": 2253,
2443
+ "[76Se]": 2254,
2444
+ "[168Tm]": 2255,
2445
+ "[167Dy]": 2256,
2446
+ "[154Gd]": 2257,
2447
+ "[95Ru]": 2258,
2448
+ "[210At]": 2259,
2449
+ "[85Br]": 2260,
2450
+ "[59Co]": 2261,
2451
+ "[122Xe]": 2262,
2452
+ "[27Al]": 2263,
2453
+ "[54Cr]": 2264,
2454
+ "[198Hg]": 2265,
2455
+ "[85Rb+]": 2266,
2456
+ "[214Tl]": 2267,
2457
+ "[229Rn]": 2268,
2458
+ "[218Pb]": 2269,
2459
+ "[218Bi]": 2270,
2460
+ "[167Tm+3]": 2271,
2461
+ "[18o+]": 2272,
2462
+ "[P@@H+]": 2273,
2463
+ "[P@H+]": 2274,
2464
+ "[13N+]": 2275,
2465
+ "[212Pb+2]": 2276,
2466
+ "[217Bi]": 2277,
2467
+ "[249Cf+2]": 2278,
2468
+ "[18OH3+]": 2279,
2469
+ "[90Sr-]": 2280,
2470
+ "[Cf+3]": 2281,
2471
+ "[200Hg]": 2282,
2472
+ "[86Tc]": 2283,
2473
+ "[141Pr+3]": 2284,
2474
+ "[141Pr]": 2285,
2475
+ "[16nH]": 2286,
2476
+ "[14NH4+]": 2287,
2477
+ "[132Xe]": 2288,
2478
+ "[83Kr]": 2289,
2479
+ "[70Zn+2]": 2290,
2480
+ "[137Ba+2]": 2291,
2481
+ "[36Ar]": 2292,
2482
+ "[38Ar]": 2293,
2483
+ "[21Ne]": 2294,
2484
+ "[126Xe]": 2295,
2485
+ "[136Xe]": 2296,
2486
+ "[128Xe]": 2297,
2487
+ "[134Xe]": 2298,
2488
+ "[84Kr]": 2299,
2489
+ "[86Kr]": 2300,
2490
+ "[78Kr]": 2301,
2491
+ "[80Kr]": 2302,
2492
+ "[82Kr]": 2303,
2493
+ "[67Zn+2]": 2304,
2494
+ "[65Cu+2]": 2305,
2495
+ "[110Te]": 2306,
2496
+ "[58Fe+3]": 2307,
2497
+ "[142Nd]": 2308,
2498
+ "[38K]": 2309,
2499
+ "[198Au+3]": 2310,
2500
+ "[122IH]": 2311,
2501
+ "[38PH3]": 2312,
2502
+ "[130I-]": 2313,
2503
+ "[40K+]": 2314,
2504
+ "[38K+]": 2315,
2505
+ "[28Mg+2]": 2316,
2506
+ "[208Tl+]": 2317,
2507
+ "[13OH2]": 2318,
2508
+ "[198Bi]": 2319,
2509
+ "[192Bi]": 2320,
2510
+ "[194Bi]": 2321,
2511
+ "[196Bi]": 2322,
2512
+ "[132I-]": 2323,
2513
+ "[83Sr+2]": 2324,
2514
+ "[169Er+3]": 2325,
2515
+ "[122I-]": 2326,
2516
+ "[120I-]": 2327,
2517
+ "[92Sr+2]": 2328,
2518
+ "[126I-]": 2329,
2519
+ "[24Mg]": 2330,
2520
+ "[84Sr]": 2331,
2521
+ "[118Pd+2]": 2332,
2522
+ "[118Pd]": 2333,
2523
+ "[AsH4]": 2334,
2524
+ "[127I-]": 2335,
2525
+ "[9C-]": 2336,
2526
+ "[11CH3+]": 2337,
2527
+ "[17B]": 2338,
2528
+ "[7B]": 2339,
2529
+ "[4HH]": 2340,
2530
+ "[18C-]": 2341,
2531
+ "[22CH3-]": 2342,
2532
+ "[22CH4]": 2343,
2533
+ "[17C-]": 2344,
2534
+ "[15CH3]": 2345,
2535
+ "[16CH3]": 2346,
2536
+ "[11NH3]": 2347,
2537
+ "[21NH3]": 2348,
2538
+ "[11N-]": 2349,
2539
+ "[11NH]": 2350,
2540
+ "[16CH]": 2351,
2541
+ "[17CH2]": 2352,
2542
+ "[99Ru+2]": 2353,
2543
+ "[181Ta+2]": 2354,
2544
+ "[181Ta]": 2355,
2545
+ "[20CH]": 2356,
2546
+ "[32PH2]": 2357,
2547
+ "[55Fe+2]": 2358,
2548
+ "[SH3]": 2359,
2549
+ "[S@H]": 2360,
2550
+ "[UNK]": 2361
2551
+ },
2552
+ "merges": []
2553
+ }
2554
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[MASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "2361": {
36
+ "content": "[UNK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "extra_special_tokens": {},
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 256,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "tokenizer_class": "PreTrainedTokenizerFast",
52
+ "unk_token": "[UNK]"
53
+ }