sougatamaity commited on
Commit
34f6746
·
1 Parent(s): c0b8d92

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/dataset.json +17 -0
  2. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/dataset_fingerprint.json +618 -0
  3. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth +3 -0
  4. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth +3 -0
  5. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/debug.json +52 -0
  6. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/network_architecture +171 -0
  7. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/progress.png +0 -0
  8. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_21_50_08.txt +26 -0
  9. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_21_56_01.txt +26 -0
  10. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_22_29_34.txt +0 -0
  11. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_14_08_44_57.txt +782 -0
  12. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_16_11_52_25.txt +887 -0
  13. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/plans.json +454 -0
  14. Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/postprocessing.pkl +3 -0
  15. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/dataset.json +12 -0
  16. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/dataset_fingerprint.json +618 -0
  17. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/checkpoint_best.pth +3 -0
  18. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/checkpoint_latest.pth +3 -0
  19. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/debug.json +52 -0
  20. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/network_architecture +233 -0
  21. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/progress.png +0 -0
  22. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/training_log_2023_11_6_13_13_08.txt +1066 -0
  23. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/plans.json +454 -0
  24. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json +12 -0
  25. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json +618 -0
  26. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth +3 -0
  27. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth +3 -0
  28. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json +52 -0
  29. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/network_architecture +171 -0
  30. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png +0 -0
  31. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_1_12_29_08.txt +1654 -0
  32. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_3_11_49_25.txt +75 -0
  33. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_5_04_09_40.txt +665 -0
  34. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/plans.json +454 -0
  35. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/dataset.json +12 -0
  36. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/dataset_fingerprint.json +618 -0
  37. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/checkpoint_best.pth +3 -0
  38. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/checkpoint_latest.pth +3 -0
  39. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/debug.json +52 -0
  40. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/network_architecture +171 -0
  41. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/progress.png +0 -0
  42. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/training_log_2023_11_5_22_05_41.txt +660 -0
  43. Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/plans.json +454 -0
  44. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json +12 -0
  45. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json +618 -0
  46. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth +3 -0
  47. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_final.pth +3 -0
  48. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json +52 -0
  49. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png +0 -0
  50. Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_1_18_43_12.txt +0 -0
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/dataset.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "CT"
4
+ },
5
+ "labels": {
6
+ "background": 0,
7
+ "Bladder": 1,
8
+ "Anorectum": 2,
9
+ "Bag_Bowel": 3,
10
+ "Femur_Head_L": 4,
11
+ "Femur_Head_R": 5,
12
+ "Penilebulb": 6
13
+ },
14
+ "numTraining": 60,
15
+ "file_ending": ".nii.gz",
16
+ "numTest": 0
17
+ }
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/dataset_fingerprint.json ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "foreground_intensity_properties_per_channel": {
3
+ "0": {
4
+ "max": 1620.0,
5
+ "mean": -38.229164123535156,
6
+ "median": -54.0,
7
+ "min": -1000.0,
8
+ "percentile_00_5": -941.0,
9
+ "percentile_99_5": 897.0,
10
+ "std": 192.37086486816406
11
+ }
12
+ },
13
+ "median_relative_size_after_cropping": 1.0,
14
+ "shapes_after_crop": [
15
+ [
16
+ 230,
17
+ 512,
18
+ 512
19
+ ],
20
+ [
21
+ 240,
22
+ 512,
23
+ 512
24
+ ],
25
+ [
26
+ 260,
27
+ 512,
28
+ 512
29
+ ],
30
+ [
31
+ 215,
32
+ 512,
33
+ 512
34
+ ],
35
+ [
36
+ 260,
37
+ 512,
38
+ 512
39
+ ],
40
+ [
41
+ 220,
42
+ 512,
43
+ 512
44
+ ],
45
+ [
46
+ 210,
47
+ 512,
48
+ 512
49
+ ],
50
+ [
51
+ 240,
52
+ 512,
53
+ 512
54
+ ],
55
+ [
56
+ 265,
57
+ 512,
58
+ 512
59
+ ],
60
+ [
61
+ 229,
62
+ 512,
63
+ 512
64
+ ],
65
+ [
66
+ 230,
67
+ 512,
68
+ 512
69
+ ],
70
+ [
71
+ 243,
72
+ 512,
73
+ 512
74
+ ],
75
+ [
76
+ 230,
77
+ 512,
78
+ 512
79
+ ],
80
+ [
81
+ 250,
82
+ 512,
83
+ 512
84
+ ],
85
+ [
86
+ 250,
87
+ 512,
88
+ 512
89
+ ],
90
+ [
91
+ 245,
92
+ 512,
93
+ 512
94
+ ],
95
+ [
96
+ 235,
97
+ 512,
98
+ 512
99
+ ],
100
+ [
101
+ 250,
102
+ 512,
103
+ 512
104
+ ],
105
+ [
106
+ 242,
107
+ 512,
108
+ 512
109
+ ],
110
+ [
111
+ 241,
112
+ 512,
113
+ 512
114
+ ],
115
+ [
116
+ 210,
117
+ 512,
118
+ 512
119
+ ],
120
+ [
121
+ 255,
122
+ 512,
123
+ 512
124
+ ],
125
+ [
126
+ 246,
127
+ 512,
128
+ 512
129
+ ],
130
+ [
131
+ 240,
132
+ 512,
133
+ 512
134
+ ],
135
+ [
136
+ 245,
137
+ 512,
138
+ 512
139
+ ],
140
+ [
141
+ 250,
142
+ 512,
143
+ 512
144
+ ],
145
+ [
146
+ 249,
147
+ 512,
148
+ 512
149
+ ],
150
+ [
151
+ 210,
152
+ 512,
153
+ 512
154
+ ],
155
+ [
156
+ 210,
157
+ 512,
158
+ 512
159
+ ],
160
+ [
161
+ 244,
162
+ 512,
163
+ 512
164
+ ],
165
+ [
166
+ 230,
167
+ 512,
168
+ 512
169
+ ],
170
+ [
171
+ 235,
172
+ 512,
173
+ 512
174
+ ],
175
+ [
176
+ 260,
177
+ 512,
178
+ 512
179
+ ],
180
+ [
181
+ 241,
182
+ 512,
183
+ 512
184
+ ],
185
+ [
186
+ 220,
187
+ 512,
188
+ 512
189
+ ],
190
+ [
191
+ 240,
192
+ 512,
193
+ 512
194
+ ],
195
+ [
196
+ 190,
197
+ 512,
198
+ 512
199
+ ],
200
+ [
201
+ 255,
202
+ 512,
203
+ 512
204
+ ],
205
+ [
206
+ 230,
207
+ 512,
208
+ 512
209
+ ],
210
+ [
211
+ 255,
212
+ 512,
213
+ 512
214
+ ],
215
+ [
216
+ 236,
217
+ 512,
218
+ 512
219
+ ],
220
+ [
221
+ 241,
222
+ 512,
223
+ 512
224
+ ],
225
+ [
226
+ 220,
227
+ 512,
228
+ 512
229
+ ],
230
+ [
231
+ 241,
232
+ 512,
233
+ 512
234
+ ],
235
+ [
236
+ 245,
237
+ 512,
238
+ 512
239
+ ],
240
+ [
241
+ 241,
242
+ 512,
243
+ 512
244
+ ],
245
+ [
246
+ 250,
247
+ 512,
248
+ 512
249
+ ],
250
+ [
251
+ 210,
252
+ 512,
253
+ 512
254
+ ],
255
+ [
256
+ 250,
257
+ 512,
258
+ 512
259
+ ],
260
+ [
261
+ 266,
262
+ 512,
263
+ 512
264
+ ],
265
+ [
266
+ 220,
267
+ 512,
268
+ 512
269
+ ],
270
+ [
271
+ 230,
272
+ 512,
273
+ 512
274
+ ],
275
+ [
276
+ 280,
277
+ 512,
278
+ 512
279
+ ],
280
+ [
281
+ 260,
282
+ 512,
283
+ 512
284
+ ],
285
+ [
286
+ 245,
287
+ 512,
288
+ 512
289
+ ],
290
+ [
291
+ 220,
292
+ 512,
293
+ 512
294
+ ],
295
+ [
296
+ 240,
297
+ 512,
298
+ 512
299
+ ],
300
+ [
301
+ 250,
302
+ 512,
303
+ 512
304
+ ],
305
+ [
306
+ 226,
307
+ 512,
308
+ 512
309
+ ],
310
+ [
311
+ 240,
312
+ 512,
313
+ 512
314
+ ]
315
+ ],
316
+ "spacings": [
317
+ [
318
+ 2.5,
319
+ 1.269531011581421,
320
+ 1.269531011581421
321
+ ],
322
+ [
323
+ 2.5,
324
+ 1.269531011581421,
325
+ 1.269531011581421
326
+ ],
327
+ [
328
+ 2.5,
329
+ 1.269531011581421,
330
+ 1.269531011581421
331
+ ],
332
+ [
333
+ 2.5,
334
+ 1.269531011581421,
335
+ 1.269531011581421
336
+ ],
337
+ [
338
+ 2.5,
339
+ 1.269531011581421,
340
+ 1.269531011581421
341
+ ],
342
+ [
343
+ 2.5,
344
+ 1.269531011581421,
345
+ 1.269531011581421
346
+ ],
347
+ [
348
+ 2.5,
349
+ 1.269531011581421,
350
+ 1.269531011581421
351
+ ],
352
+ [
353
+ 2.5,
354
+ 1.269531011581421,
355
+ 1.269531011581421
356
+ ],
357
+ [
358
+ 2.5,
359
+ 1.269531011581421,
360
+ 1.269531011581421
361
+ ],
362
+ [
363
+ 2.5,
364
+ 1.269531011581421,
365
+ 1.269531011581421
366
+ ],
367
+ [
368
+ 2.5,
369
+ 1.269531011581421,
370
+ 1.269531011581421
371
+ ],
372
+ [
373
+ 2.5,
374
+ 1.269531011581421,
375
+ 1.269531011581421
376
+ ],
377
+ [
378
+ 2.5,
379
+ 1.269531011581421,
380
+ 1.269531011581421
381
+ ],
382
+ [
383
+ 2.5,
384
+ 1.269531011581421,
385
+ 1.269531011581421
386
+ ],
387
+ [
388
+ 2.5,
389
+ 1.269531011581421,
390
+ 1.269531011581421
391
+ ],
392
+ [
393
+ 2.5,
394
+ 1.269531011581421,
395
+ 1.269531011581421
396
+ ],
397
+ [
398
+ 2.5,
399
+ 1.269531011581421,
400
+ 1.269531011581421
401
+ ],
402
+ [
403
+ 2.5,
404
+ 1.269531011581421,
405
+ 1.269531011581421
406
+ ],
407
+ [
408
+ 2.5,
409
+ 1.269531011581421,
410
+ 1.269531011581421
411
+ ],
412
+ [
413
+ 2.5,
414
+ 1.269531011581421,
415
+ 1.269531011581421
416
+ ],
417
+ [
418
+ 2.5,
419
+ 1.269531011581421,
420
+ 1.269531011581421
421
+ ],
422
+ [
423
+ 2.5,
424
+ 1.269531011581421,
425
+ 1.269531011581421
426
+ ],
427
+ [
428
+ 2.5,
429
+ 1.269531011581421,
430
+ 1.269531011581421
431
+ ],
432
+ [
433
+ 2.5,
434
+ 1.269531011581421,
435
+ 1.269531011581421
436
+ ],
437
+ [
438
+ 2.5,
439
+ 1.269531011581421,
440
+ 1.269531011581421
441
+ ],
442
+ [
443
+ 2.5,
444
+ 1.269531011581421,
445
+ 1.269531011581421
446
+ ],
447
+ [
448
+ 2.5,
449
+ 1.269531011581421,
450
+ 1.269531011581421
451
+ ],
452
+ [
453
+ 2.5,
454
+ 1.269531011581421,
455
+ 1.269531011581421
456
+ ],
457
+ [
458
+ 2.5,
459
+ 1.269531011581421,
460
+ 1.269531011581421
461
+ ],
462
+ [
463
+ 2.5,
464
+ 1.269531011581421,
465
+ 1.269531011581421
466
+ ],
467
+ [
468
+ 2.5,
469
+ 1.269531011581421,
470
+ 1.269531011581421
471
+ ],
472
+ [
473
+ 2.5,
474
+ 1.269531011581421,
475
+ 1.269531011581421
476
+ ],
477
+ [
478
+ 2.5,
479
+ 1.269531011581421,
480
+ 1.269531011581421
481
+ ],
482
+ [
483
+ 2.5,
484
+ 1.269531011581421,
485
+ 1.269531011581421
486
+ ],
487
+ [
488
+ 2.5,
489
+ 1.269531011581421,
490
+ 1.269531011581421
491
+ ],
492
+ [
493
+ 2.5,
494
+ 1.269531011581421,
495
+ 1.269531011581421
496
+ ],
497
+ [
498
+ 2.5,
499
+ 1.269531011581421,
500
+ 1.269531011581421
501
+ ],
502
+ [
503
+ 2.5,
504
+ 1.269531011581421,
505
+ 1.269531011581421
506
+ ],
507
+ [
508
+ 2.5,
509
+ 1.269531011581421,
510
+ 1.269531011581421
511
+ ],
512
+ [
513
+ 2.5,
514
+ 1.269531011581421,
515
+ 1.269531011581421
516
+ ],
517
+ [
518
+ 2.5,
519
+ 1.269531011581421,
520
+ 1.269531011581421
521
+ ],
522
+ [
523
+ 2.5,
524
+ 1.269531011581421,
525
+ 1.269531011581421
526
+ ],
527
+ [
528
+ 2.5,
529
+ 1.269531011581421,
530
+ 1.269531011581421
531
+ ],
532
+ [
533
+ 2.5,
534
+ 1.269531011581421,
535
+ 1.269531011581421
536
+ ],
537
+ [
538
+ 2.5,
539
+ 1.269531011581421,
540
+ 1.269531011581421
541
+ ],
542
+ [
543
+ 2.5,
544
+ 1.269531011581421,
545
+ 1.269531011581421
546
+ ],
547
+ [
548
+ 2.5,
549
+ 1.269531011581421,
550
+ 1.269531011581421
551
+ ],
552
+ [
553
+ 2.5,
554
+ 1.269531011581421,
555
+ 1.269531011581421
556
+ ],
557
+ [
558
+ 2.5,
559
+ 1.269531011581421,
560
+ 1.269531011581421
561
+ ],
562
+ [
563
+ 2.5,
564
+ 1.269531011581421,
565
+ 1.269531011581421
566
+ ],
567
+ [
568
+ 2.5,
569
+ 1.269531011581421,
570
+ 1.269531011581421
571
+ ],
572
+ [
573
+ 2.5,
574
+ 1.269531011581421,
575
+ 1.269531011581421
576
+ ],
577
+ [
578
+ 2.5,
579
+ 1.269531011581421,
580
+ 1.269531011581421
581
+ ],
582
+ [
583
+ 2.5,
584
+ 1.269531011581421,
585
+ 1.269531011581421
586
+ ],
587
+ [
588
+ 2.5,
589
+ 1.269531011581421,
590
+ 1.269531011581421
591
+ ],
592
+ [
593
+ 2.5,
594
+ 1.269531011581421,
595
+ 1.269531011581421
596
+ ],
597
+ [
598
+ 2.5,
599
+ 1.269531011581421,
600
+ 1.269531011581421
601
+ ],
602
+ [
603
+ 2.5,
604
+ 1.269531011581421,
605
+ 1.269531011581421
606
+ ],
607
+ [
608
+ 2.5,
609
+ 1.269531011581421,
610
+ 1.269531011581421
611
+ ],
612
+ [
613
+ 2.5,
614
+ 1.269531011581421,
615
+ 1.269531011581421
616
+ ]
617
+ ]
618
+ }
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e27d5b54b180e4e0e2f27bff5ed8d071cf2d431d4861a3b1fcee4f2f76ba0da8
3
+ size 246768929
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15ec58f8a56fa17e53739749487a8cbf54fb0282167c5e1c9bf5c0c89082e96a
3
+ size 246952469
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/debug.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_best_ema": "0.86605509930085",
3
+ "batch_size": "2",
4
+ "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}",
5
+ "configuration_name": "3d_fullres",
6
+ "cudnn_version": 8500,
7
+ "current_epoch": "650",
8
+ "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fb9a6ed9e50>",
9
+ "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fb9a6db8890>",
10
+ "dataloader_train.num_processes": "12",
11
+ "dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [80, 192, 160], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-0.5235987755982988, 0.5235987755982988), angle_y = (-0.5235987755982988, 0.5235987755982988), angle_z = (-0.5235987755982988, 0.5235987755982988), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
+ "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fb9a6ed9e90>",
13
+ "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fb9a6dba150>",
14
+ "dataloader_val.num_processes": "6",
15
+ "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
+ "dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Bladder': 1, 'Anorectum': 2, 'Bag_Bowel': 3, 'Femur_Head_L': 4, 'Femur_Head_R': 5, 'Penilebulb': 6}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}",
17
+ "device": "cuda:0",
18
+ "disable_checkpointing": "False",
19
+ "fold": "0",
20
+ "folder_with_segs_from_previous_stage": "None",
21
+ "gpu_name": "NVIDIA GeForce GTX 1080 Ti",
22
+ "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7fb9a74c7010>",
23
+ "hostname": "vipadmin-Z10PE-D16-WS",
24
+ "inference_allowed_mirroring_axes": "None",
25
+ "initial_lr": "0.01",
26
+ "is_cascaded": "False",
27
+ "is_ddp": "False",
28
+ "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7fb9a734f410>",
29
+ "local_rank": "0",
30
+ "log_file": "./data/nnUNet_results/Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_16_11_52_25.txt",
31
+ "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7fb9a6ea1490>",
32
+ "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
+ "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7fb9a74e2ad0>",
34
+ "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1620.0, 'mean': -38.229164123535156, 'median': -54.0, 'min': -1000.0, 'percentile_00_5': -941.0, 'percentile_99_5': 897.0, 'std': 192.37086486816406}}}, 'configuration': '3d_fullres', 'fold': 0, 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Bladder': 1, 'Anorectum': 2, 'Bag_Bowel': 3, 'Femur_Head_L': 4, 'Femur_Head_R': 5, 'Penilebulb': 6}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
+ "network": "PlainConvUNet",
36
+ "num_epochs": "1000",
37
+ "num_input_channels": "1",
38
+ "num_iterations_per_epoch": "250",
39
+ "num_val_iterations_per_epoch": "50",
40
+ "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.003897412779133726\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
+ "output_folder": "./data/nnUNet_results/Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0",
42
+ "output_folder_base": "./data/nnUNet_results/Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres",
43
+ "oversample_foreground_percent": "0.33",
44
+ "plans_manager": "{'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1620.0, 'mean': -38.229164123535156, 'median': -54.0, 'min': -1000.0, 'percentile_00_5': -941.0, 'percentile_99_5': 897.0, 'std': 192.37086486816406}}}",
45
+ "preprocessed_dataset_folder": "./data/nnUNet_preprocessed/Dataset720_TSPrime/nnUNetPlans_3d_fullres",
46
+ "preprocessed_dataset_folder_base": "./data/nnUNet_preprocessed/Dataset720_TSPrime",
47
+ "save_every": "50",
48
+ "torch_version": "2.0.1+cu117",
49
+ "unpack_dataset": "True",
50
+ "was_initialized": "True",
51
+ "weight_decay": "3e-05"
52
+ }
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/network_architecture ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ digraph {
2
+ graph [bgcolor="#FFFFFF" color="#000000" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" pad="1.0,0.5" rankdir=LR]
3
+ node [color="#000000" fillcolor="#E8E8E8" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" shape=box style=filled]
4
+ edge [color="#000000" fontcolor="#000000" fontname=Times fontsize=10 style=solid]
5
+ "/outputs/109" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
6
+ "/outputs/110" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
7
+ "/outputs/111" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
8
+ "/outputs/112" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
9
+ "/outputs/113" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
10
+ "/outputs/114" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
11
+ "/outputs/115" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
12
+ "/outputs/116" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
13
+ "/outputs/117" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
14
+ "/outputs/118" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
15
+ "/outputs/119" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
16
+ "/outputs/120" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
17
+ "/outputs/121" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
18
+ "/outputs/122" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
19
+ "/outputs/123" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
20
+ "/outputs/124" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
21
+ "/outputs/125" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
22
+ "/outputs/126" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
23
+ "/outputs/127" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
24
+ "/outputs/128" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
25
+ "/outputs/129" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
26
+ "/outputs/130" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
27
+ "/outputs/131" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
28
+ "/outputs/132" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
29
+ "/outputs/133" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
30
+ "/outputs/134" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
31
+ "/outputs/135" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
32
+ "/outputs/136" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
33
+ "/outputs/137" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
34
+ "/outputs/138" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
35
+ "/outputs/139" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 2, 2]</td></tr></table>>]
36
+ "/outputs/140" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
37
+ "/outputs/141" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
38
+ "/outputs/142" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
39
+ "/outputs/143" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
40
+ "/outputs/144" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
41
+ "/outputs/145" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [1, 2, 2], stride: [1, 2, 2]</td></tr></table>>]
42
+ "/outputs/146" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
43
+ "/outputs/147" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
44
+ "/outputs/148" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
45
+ "/outputs/149" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
46
+ "/outputs/150" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
47
+ "/outputs/151" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
48
+ "/outputs/152" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
49
+ "/outputs/153" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
50
+ "/outputs/154" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
51
+ "/outputs/155" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
52
+ "/outputs/156" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
53
+ "/outputs/157" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
54
+ "/outputs/158" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
55
+ "/outputs/159" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
56
+ "/outputs/160" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
57
+ "/outputs/161" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
58
+ "/outputs/162" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
59
+ "/outputs/163" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
60
+ "/outputs/164" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
61
+ "/outputs/165" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
62
+ "/outputs/166" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
63
+ "/outputs/167" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
64
+ "/outputs/168" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
65
+ "/outputs/169" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
66
+ "/outputs/170" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
67
+ "/outputs/171" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
68
+ "/outputs/172" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
69
+ "/outputs/173" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
70
+ "/outputs/174" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
71
+ "/outputs/175" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
72
+ "/outputs/176" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
73
+ "/outputs/177" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
74
+ "/outputs/178" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
75
+ "/outputs/179" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
76
+ "/outputs/180" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
77
+ "/outputs/181" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
78
+ "/outputs/182" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
79
+ "/outputs/183" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
80
+ "/outputs/184" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
81
+ "/outputs/185" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
82
+ "/outputs/186" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
83
+ "/outputs/187" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
84
+ "/outputs/188" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
85
+ "/outputs/189" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
86
+ "/outputs/109" -> "/outputs/110" [label="1x32x80x192x160"]
87
+ "/outputs/110" -> "/outputs/111" [label="1x32x80x192x160"]
88
+ "/outputs/111" -> "/outputs/112" [label="1x32x80x192x160"]
89
+ "/outputs/112" -> "/outputs/113" [label="1x32x80x192x160"]
90
+ "/outputs/113" -> "/outputs/114" [label="1x32x80x192x160"]
91
+ "/outputs/114" -> "/outputs/115" [label="1x32x80x192x160"]
92
+ "/outputs/114" -> "/outputs/182" [label="1x32x80x192x160"]
93
+ "/outputs/115" -> "/outputs/116" [label="1x64x40x96x80"]
94
+ "/outputs/116" -> "/outputs/117" [label="1x64x40x96x80"]
95
+ "/outputs/117" -> "/outputs/118" [label="1x64x40x96x80"]
96
+ "/outputs/118" -> "/outputs/119" [label="1x64x40x96x80"]
97
+ "/outputs/119" -> "/outputs/120" [label="1x64x40x96x80"]
98
+ "/outputs/120" -> "/outputs/121" [label="1x64x40x96x80"]
99
+ "/outputs/120" -> "/outputs/173" [label="1x64x40x96x80"]
100
+ "/outputs/121" -> "/outputs/122" [label="1x128x20x48x40"]
101
+ "/outputs/122" -> "/outputs/123" [label="1x128x20x48x40"]
102
+ "/outputs/123" -> "/outputs/124" [label="1x128x20x48x40"]
103
+ "/outputs/124" -> "/outputs/125" [label="1x128x20x48x40"]
104
+ "/outputs/125" -> "/outputs/126" [label="1x128x20x48x40"]
105
+ "/outputs/126" -> "/outputs/127" [label="1x128x20x48x40"]
106
+ "/outputs/126" -> "/outputs/164" [label="1x128x20x48x40"]
107
+ "/outputs/127" -> "/outputs/128" [label="1x256x10x24x20"]
108
+ "/outputs/128" -> "/outputs/129" [label="1x256x10x24x20"]
109
+ "/outputs/129" -> "/outputs/130" [label="1x256x10x24x20"]
110
+ "/outputs/130" -> "/outputs/131" [label="1x256x10x24x20"]
111
+ "/outputs/131" -> "/outputs/132" [label="1x256x10x24x20"]
112
+ "/outputs/132" -> "/outputs/133" [label="1x256x10x24x20"]
113
+ "/outputs/132" -> "/outputs/155" [label="1x256x10x24x20"]
114
+ "/outputs/133" -> "/outputs/134" [label="1x320x5x12x10"]
115
+ "/outputs/134" -> "/outputs/135" [label="1x320x5x12x10"]
116
+ "/outputs/135" -> "/outputs/136" [label="1x320x5x12x10"]
117
+ "/outputs/136" -> "/outputs/137" [label="1x320x5x12x10"]
118
+ "/outputs/137" -> "/outputs/138" [label="1x320x5x12x10"]
119
+ "/outputs/138" -> "/outputs/139" [label="1x320x5x12x10"]
120
+ "/outputs/138" -> "/outputs/146" [label="1x320x5x12x10"]
121
+ "/outputs/139" -> "/outputs/140" [label="1x320x5x6x5"]
122
+ "/outputs/140" -> "/outputs/141" [label="1x320x5x6x5"]
123
+ "/outputs/141" -> "/outputs/142" [label="1x320x5x6x5"]
124
+ "/outputs/142" -> "/outputs/143" [label="1x320x5x6x5"]
125
+ "/outputs/143" -> "/outputs/144" [label="1x320x5x6x5"]
126
+ "/outputs/144" -> "/outputs/145" [label="1x320x5x6x5"]
127
+ "/outputs/145" -> "/outputs/146" [label="1x320x5x12x10"]
128
+ "/outputs/146" -> "/outputs/147" [label="1x640x5x12x10"]
129
+ "/outputs/147" -> "/outputs/148" [label="1x320x5x12x10"]
130
+ "/outputs/148" -> "/outputs/149" [label="1x320x5x12x10"]
131
+ "/outputs/149" -> "/outputs/150" [label="1x320x5x12x10"]
132
+ "/outputs/150" -> "/outputs/151" [label="1x320x5x12x10"]
133
+ "/outputs/151" -> "/outputs/152" [label="1x320x5x12x10"]
134
+ "/outputs/152" -> "/outputs/153" [label="1x320x5x12x10"]
135
+ "/outputs/152" -> "/outputs/154" [label="1x320x5x12x10"]
136
+ "/outputs/154" -> "/outputs/155" [label="1x256x10x24x20"]
137
+ "/outputs/155" -> "/outputs/156" [label="1x512x10x24x20"]
138
+ "/outputs/156" -> "/outputs/157" [label="1x256x10x24x20"]
139
+ "/outputs/157" -> "/outputs/158" [label="1x256x10x24x20"]
140
+ "/outputs/158" -> "/outputs/159" [label="1x256x10x24x20"]
141
+ "/outputs/159" -> "/outputs/160" [label="1x256x10x24x20"]
142
+ "/outputs/160" -> "/outputs/161" [label="1x256x10x24x20"]
143
+ "/outputs/161" -> "/outputs/162" [label="1x256x10x24x20"]
144
+ "/outputs/161" -> "/outputs/163" [label="1x256x10x24x20"]
145
+ "/outputs/163" -> "/outputs/164" [label="1x128x20x48x40"]
146
+ "/outputs/164" -> "/outputs/165" [label="1x256x20x48x40"]
147
+ "/outputs/165" -> "/outputs/166" [label="1x128x20x48x40"]
148
+ "/outputs/166" -> "/outputs/167" [label="1x128x20x48x40"]
149
+ "/outputs/167" -> "/outputs/168" [label="1x128x20x48x40"]
150
+ "/outputs/168" -> "/outputs/169" [label="1x128x20x48x40"]
151
+ "/outputs/169" -> "/outputs/170" [label="1x128x20x48x40"]
152
+ "/outputs/170" -> "/outputs/171" [label="1x128x20x48x40"]
153
+ "/outputs/170" -> "/outputs/172" [label="1x128x20x48x40"]
154
+ "/outputs/172" -> "/outputs/173" [label="1x64x40x96x80"]
155
+ "/outputs/173" -> "/outputs/174" [label="1x128x40x96x80"]
156
+ "/outputs/174" -> "/outputs/175" [label="1x64x40x96x80"]
157
+ "/outputs/175" -> "/outputs/176" [label="1x64x40x96x80"]
158
+ "/outputs/176" -> "/outputs/177" [label="1x64x40x96x80"]
159
+ "/outputs/177" -> "/outputs/178" [label="1x64x40x96x80"]
160
+ "/outputs/178" -> "/outputs/179" [label="1x64x40x96x80"]
161
+ "/outputs/179" -> "/outputs/180" [label="1x64x40x96x80"]
162
+ "/outputs/179" -> "/outputs/181" [label="1x64x40x96x80"]
163
+ "/outputs/181" -> "/outputs/182" [label="1x32x80x192x160"]
164
+ "/outputs/182" -> "/outputs/183" [label="1x64x80x192x160"]
165
+ "/outputs/183" -> "/outputs/184" [label="1x32x80x192x160"]
166
+ "/outputs/184" -> "/outputs/185" [label="1x32x80x192x160"]
167
+ "/outputs/185" -> "/outputs/186" [label="1x32x80x192x160"]
168
+ "/outputs/186" -> "/outputs/187" [label="1x32x80x192x160"]
169
+ "/outputs/187" -> "/outputs/188" [label="1x32x80x192x160"]
170
+ "/outputs/188" -> "/outputs/189" [label="1x32x80x192x160"]
171
+ }
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/progress.png ADDED
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_21_50_08.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [240.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [240, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 3071.0, 'mean': -12.232562065124512, 'median': -21.0, 'min': -1000.0, 'percentile_00_5': -793.0, 'percentile_99_5': 974.0, 'std': 168.24203491210938}}}
14
+
15
+ 2023-10-11 21:50:09.984604: unpacking dataset...
16
+ 2023-10-11 21:51:22.972373: unpacking done...
17
+ 2023-10-11 21:51:23.374131: do_dummy_2d_data_aug: False
18
+ 2023-10-11 21:51:23.375596: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset720_TSPrime/splits_final.json
19
+ 2023-10-11 21:51:23.541955: The split file contains 5 splits.
20
+ 2023-10-11 21:51:23.542130: Desired fold for training: 0
21
+ 2023-10-11 21:51:23.542256: This split has 20 training and 5 validation cases.
22
+ 2023-10-11 21:52:06.109724: Unable to plot network architecture:
23
+ 2023-10-11 21:52:06.109830: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-10-11 21:52:06.212852:
25
+ 2023-10-11 21:52:06.212922: Epoch 0
26
+ 2023-10-11 21:52:06.213036: Current learning rate: 0.01
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_21_56_01.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 3, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [240.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [240, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 3071.0, 'mean': -12.232562065124512, 'median': -21.0, 'min': -1000.0, 'percentile_00_5': -793.0, 'percentile_99_5': 974.0, 'std': 168.24203491210938}}}
14
+
15
+ 2023-10-11 21:56:02.926755: unpacking dataset...
16
+ 2023-10-11 21:56:06.110042: unpacking done...
17
+ 2023-10-11 21:56:06.110639: do_dummy_2d_data_aug: False
18
+ 2023-10-11 21:56:06.111117: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset720_TSPrime/splits_final.json
19
+ 2023-10-11 21:56:06.111239: The split file contains 5 splits.
20
+ 2023-10-11 21:56:06.111284: Desired fold for training: 0
21
+ 2023-10-11 21:56:06.111325: This split has 20 training and 5 validation cases.
22
+ 2023-10-11 21:56:27.774188: Unable to plot network architecture:
23
+ 2023-10-11 21:56:27.774289: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-10-11 21:56:27.879786:
25
+ 2023-10-11 21:56:27.879845: Epoch 0
26
+ 2023-10-11 21:56:27.879956: Current learning rate: 0.01
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_11_22_29_34.txt ADDED
The diff for this file is too large to render. See raw diff
 
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_14_08_44_57.txt ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1620.0, 'mean': -38.229164123535156, 'median': -54.0, 'min': -1000.0, 'percentile_00_5': -941.0, 'percentile_99_5': 897.0, 'std': 192.37086486816406}}}
14
+
15
+ 2023-10-14 08:44:59.641317: unpacking dataset...
16
+ 2023-10-14 08:45:03.933325: unpacking done...
17
+ 2023-10-14 08:45:03.934265: do_dummy_2d_data_aug: False
18
+ 2023-10-14 08:45:03.934828: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset720_TSPrime/splits_final.json
19
+ 2023-10-14 08:45:03.955630: The split file contains 5 splits.
20
+ 2023-10-14 08:45:03.955693: Desired fold for training: 0
21
+ 2023-10-14 08:45:03.955763: This split has 48 training and 12 validation cases.
22
+ 2023-10-14 08:45:26.437840: Unable to plot network architecture:
23
+ 2023-10-14 08:45:26.438034: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-10-14 08:45:26.536218:
25
+ 2023-10-14 08:45:26.536288: Epoch 550
26
+ 2023-10-14 08:45:26.536392: Current learning rate: 0.00487
27
+ 2023-10-14 08:53:03.674403: train_loss -0.7726
28
+ 2023-10-14 08:53:03.700099: val_loss -0.6799
29
+ 2023-10-14 08:53:03.700228: Pseudo dice [0.971, 0.8644, 0.908, 0.8821, 0.8677, 0.666]
30
+ 2023-10-14 08:53:03.700332: Epoch time: 457.14 s
31
+ 2023-10-14 08:53:05.057412:
32
+ 2023-10-14 08:53:05.057607: Epoch 551
33
+ 2023-10-14 08:53:05.057753: Current learning rate: 0.00486
34
+ 2023-10-14 08:58:50.064842: train_loss -0.758
35
+ 2023-10-14 08:58:50.064996: val_loss -0.682
36
+ 2023-10-14 08:58:50.065178: Pseudo dice [0.9698, 0.87, 0.9137, 0.9011, 0.8763, 0.7155]
37
+ 2023-10-14 08:58:50.065330: Epoch time: 345.01 s
38
+ 2023-10-14 08:58:51.281017:
39
+ 2023-10-14 08:58:51.281138: Epoch 552
40
+ 2023-10-14 08:58:51.281244: Current learning rate: 0.00485
41
+ 2023-10-14 09:04:36.496282: train_loss -0.7495
42
+ 2023-10-14 09:04:36.498847: val_loss -0.6299
43
+ 2023-10-14 09:04:36.498962: Pseudo dice [0.9742, 0.8785, 0.8972, 0.8699, 0.8523, 0.6534]
44
+ 2023-10-14 09:04:36.499053: Epoch time: 345.22 s
45
+ 2023-10-14 09:04:37.717736:
46
+ 2023-10-14 09:04:37.717844: Epoch 553
47
+ 2023-10-14 09:04:37.717949: Current learning rate: 0.00484
48
+ 2023-10-14 09:10:22.975977: train_loss -0.7464
49
+ 2023-10-14 09:10:22.976139: val_loss -0.6956
50
+ 2023-10-14 09:10:22.976246: Pseudo dice [0.9605, 0.8878, 0.9084, 0.9079, 0.8974, 0.6374]
51
+ 2023-10-14 09:10:22.976335: Epoch time: 345.26 s
52
+ 2023-10-14 09:10:24.198349:
53
+ 2023-10-14 09:10:24.198598: Epoch 554
54
+ 2023-10-14 09:10:24.198841: Current learning rate: 0.00484
55
+ 2023-10-14 09:16:09.415752: train_loss -0.7741
56
+ 2023-10-14 09:16:09.415916: val_loss -0.6677
57
+ 2023-10-14 09:16:09.416036: Pseudo dice [0.9697, 0.8769, 0.8922, 0.8684, 0.9019, 0.6411]
58
+ 2023-10-14 09:16:09.416137: Epoch time: 345.22 s
59
+ 2023-10-14 09:16:10.620770:
60
+ 2023-10-14 09:16:10.620962: Epoch 555
61
+ 2023-10-14 09:16:10.621224: Current learning rate: 0.00483
62
+ 2023-10-14 09:21:55.950917: train_loss -0.7982
63
+ 2023-10-14 09:21:55.951100: val_loss -0.6904
64
+ 2023-10-14 09:21:55.951208: Pseudo dice [0.9724, 0.8793, 0.9091, 0.8707, 0.878, 0.6579]
65
+ 2023-10-14 09:21:55.951298: Epoch time: 345.33 s
66
+ 2023-10-14 09:21:57.192797:
67
+ 2023-10-14 09:21:57.192911: Epoch 556
68
+ 2023-10-14 09:21:57.193016: Current learning rate: 0.00482
69
+ 2023-10-14 09:27:42.440943: train_loss -0.7506
70
+ 2023-10-14 09:27:42.441109: val_loss -0.6581
71
+ 2023-10-14 09:27:42.441239: Pseudo dice [0.9699, 0.8746, 0.9064, 0.9102, 0.8858, 0.6497]
72
+ 2023-10-14 09:27:42.441343: Epoch time: 345.25 s
73
+ 2023-10-14 09:27:43.650142:
74
+ 2023-10-14 09:27:43.650428: Epoch 557
75
+ 2023-10-14 09:27:43.650620: Current learning rate: 0.00481
76
+ 2023-10-14 09:33:28.773947: train_loss -0.7664
77
+ 2023-10-14 09:33:28.774114: val_loss -0.6489
78
+ 2023-10-14 09:33:28.774237: Pseudo dice [0.9673, 0.8709, 0.8961, 0.8558, 0.8833, 0.668]
79
+ 2023-10-14 09:33:28.774338: Epoch time: 345.12 s
80
+ 2023-10-14 09:33:30.001891:
81
+ 2023-10-14 09:33:30.002107: Epoch 558
82
+ 2023-10-14 09:33:30.002374: Current learning rate: 0.0048
83
+ 2023-10-14 09:39:15.284474: train_loss -0.7648
84
+ 2023-10-14 09:39:15.284639: val_loss -0.667
85
+ 2023-10-14 09:39:15.284746: Pseudo dice [0.9739, 0.8739, 0.9031, 0.9063, 0.8898, 0.6799]
86
+ 2023-10-14 09:39:15.284836: Epoch time: 345.28 s
87
+ 2023-10-14 09:39:16.520813:
88
+ 2023-10-14 09:39:16.520922: Epoch 559
89
+ 2023-10-14 09:39:16.521024: Current learning rate: 0.00479
90
+ 2023-10-14 09:45:01.838422: train_loss -0.7745
91
+ 2023-10-14 09:45:01.838580: val_loss -0.7023
92
+ 2023-10-14 09:45:01.838696: Pseudo dice [0.9725, 0.8726, 0.8938, 0.9313, 0.8859, 0.6822]
93
+ 2023-10-14 09:45:01.838784: Epoch time: 345.32 s
94
+ 2023-10-14 09:45:03.072559:
95
+ 2023-10-14 09:45:03.072677: Epoch 560
96
+ 2023-10-14 09:45:03.072782: Current learning rate: 0.00478
97
+ 2023-10-14 09:50:48.482734: train_loss -0.7542
98
+ 2023-10-14 09:50:48.482884: val_loss -0.6193
99
+ 2023-10-14 09:50:48.483016: Pseudo dice [0.9709, 0.8778, 0.8924, 0.8774, 0.9118, 0.6265]
100
+ 2023-10-14 09:50:48.483118: Epoch time: 345.41 s
101
+ 2023-10-14 09:50:49.873660:
102
+ 2023-10-14 09:50:49.873856: Epoch 561
103
+ 2023-10-14 09:50:49.874034: Current learning rate: 0.00477
104
+ 2023-10-14 09:56:35.256728: train_loss -0.7759
105
+ 2023-10-14 09:56:35.256931: val_loss -0.6749
106
+ 2023-10-14 09:56:35.257066: Pseudo dice [0.9724, 0.8705, 0.8941, 0.8893, 0.8904, 0.6678]
107
+ 2023-10-14 09:56:35.257167: Epoch time: 345.38 s
108
+ 2023-10-14 09:56:36.487928:
109
+ 2023-10-14 09:56:36.488053: Epoch 562
110
+ 2023-10-14 09:56:36.488159: Current learning rate: 0.00476
111
+ 2023-10-14 10:02:21.787369: train_loss -0.7461
112
+ 2023-10-14 10:02:21.787533: val_loss -0.6248
113
+ 2023-10-14 10:02:21.788099: Pseudo dice [0.9743, 0.8629, 0.8877, 0.8809, 0.8345, 0.6541]
114
+ 2023-10-14 10:02:21.788211: Epoch time: 345.3 s
115
+ 2023-10-14 10:02:23.005492:
116
+ 2023-10-14 10:02:23.005673: Epoch 563
117
+ 2023-10-14 10:02:23.005828: Current learning rate: 0.00475
118
+ 2023-10-14 10:08:08.231398: train_loss -0.7442
119
+ 2023-10-14 10:08:08.231559: val_loss -0.6617
120
+ 2023-10-14 10:08:08.231754: Pseudo dice [0.9706, 0.8615, 0.9109, 0.8754, 0.8734, 0.6588]
121
+ 2023-10-14 10:08:08.231893: Epoch time: 345.23 s
122
+ 2023-10-14 10:08:09.463090:
123
+ 2023-10-14 10:08:09.463255: Epoch 564
124
+ 2023-10-14 10:08:09.463451: Current learning rate: 0.00474
125
+ 2023-10-14 10:13:54.732506: train_loss -0.7684
126
+ 2023-10-14 10:13:54.732661: val_loss -0.6995
127
+ 2023-10-14 10:13:54.732769: Pseudo dice [0.9735, 0.8821, 0.9084, 0.8772, 0.9093, 0.6494]
128
+ 2023-10-14 10:13:54.732857: Epoch time: 345.27 s
129
+ 2023-10-14 10:13:55.953084:
130
+ 2023-10-14 10:13:55.953255: Epoch 565
131
+ 2023-10-14 10:13:55.953446: Current learning rate: 0.00473
132
+ 2023-10-14 10:19:41.225027: train_loss -0.7761
133
+ 2023-10-14 10:19:41.225191: val_loss -0.683
134
+ 2023-10-14 10:19:41.225320: Pseudo dice [0.971, 0.8826, 0.91, 0.9083, 0.9002, 0.7113]
135
+ 2023-10-14 10:19:41.225426: Epoch time: 345.27 s
136
+ 2023-10-14 10:19:42.462402:
137
+ 2023-10-14 10:19:42.462528: Epoch 566
138
+ 2023-10-14 10:19:42.462646: Current learning rate: 0.00472
139
+ 2023-10-14 10:25:27.782266: train_loss -0.7355
140
+ 2023-10-14 10:25:27.782458: val_loss -0.7007
141
+ 2023-10-14 10:25:27.782598: Pseudo dice [0.9663, 0.8682, 0.9063, 0.8794, 0.8787, 0.6951]
142
+ 2023-10-14 10:25:27.782697: Epoch time: 345.32 s
143
+ 2023-10-14 10:25:28.998262:
144
+ 2023-10-14 10:25:28.998374: Epoch 567
145
+ 2023-10-14 10:25:28.998532: Current learning rate: 0.00471
146
+ 2023-10-14 10:31:14.337671: train_loss -0.7735
147
+ 2023-10-14 10:31:14.337815: val_loss -0.7058
148
+ 2023-10-14 10:31:14.337922: Pseudo dice [0.9742, 0.879, 0.9126, 0.883, 0.8763, 0.6931]
149
+ 2023-10-14 10:31:14.338011: Epoch time: 345.34 s
150
+ 2023-10-14 10:31:15.750798:
151
+ 2023-10-14 10:31:15.750981: Epoch 568
152
+ 2023-10-14 10:31:15.751223: Current learning rate: 0.0047
153
+ 2023-10-14 10:37:01.195973: train_loss -0.7652
154
+ 2023-10-14 10:37:01.196126: val_loss -0.7085
155
+ 2023-10-14 10:37:01.196294: Pseudo dice [0.9726, 0.8601, 0.9137, 0.8982, 0.8773, 0.6364]
156
+ 2023-10-14 10:37:01.196413: Epoch time: 345.45 s
157
+ 2023-10-14 10:37:02.437130:
158
+ 2023-10-14 10:37:02.437374: Epoch 569
159
+ 2023-10-14 10:37:02.437515: Current learning rate: 0.00469
160
+ 2023-10-14 10:42:47.777742: train_loss -0.7729
161
+ 2023-10-14 10:42:47.777902: val_loss -0.6988
162
+ 2023-10-14 10:42:47.778010: Pseudo dice [0.9685, 0.8696, 0.9031, 0.8781, 0.8985, 0.6406]
163
+ 2023-10-14 10:42:47.778097: Epoch time: 345.34 s
164
+ 2023-10-14 10:42:49.021302:
165
+ 2023-10-14 10:42:49.021410: Epoch 570
166
+ 2023-10-14 10:42:49.021516: Current learning rate: 0.00468
167
+ 2023-10-14 10:48:34.305914: train_loss -0.7413
168
+ 2023-10-14 10:48:34.306067: val_loss -0.7214
169
+ 2023-10-14 10:48:34.306176: Pseudo dice [0.9726, 0.8777, 0.9129, 0.9223, 0.8883, 0.6428]
170
+ 2023-10-14 10:48:34.306266: Epoch time: 345.29 s
171
+ 2023-10-14 10:48:35.529996:
172
+ 2023-10-14 10:48:35.530115: Epoch 571
173
+ 2023-10-14 10:48:35.530219: Current learning rate: 0.00467
174
+ 2023-10-14 10:54:20.924050: train_loss -0.7771
175
+ 2023-10-14 10:54:20.924205: val_loss -0.6726
176
+ 2023-10-14 10:54:20.924314: Pseudo dice [0.972, 0.871, 0.9062, 0.9067, 0.8683, 0.6494]
177
+ 2023-10-14 10:54:20.924403: Epoch time: 345.39 s
178
+ 2023-10-14 10:54:22.148553:
179
+ 2023-10-14 10:54:22.148817: Epoch 572
180
+ 2023-10-14 10:54:22.149071: Current learning rate: 0.00466
181
+ 2023-10-14 11:00:07.467053: train_loss -0.7492
182
+ 2023-10-14 11:00:07.467210: val_loss -0.6786
183
+ 2023-10-14 11:00:07.467318: Pseudo dice [0.9718, 0.8647, 0.9095, 0.8812, 0.8904, 0.6684]
184
+ 2023-10-14 11:00:07.467407: Epoch time: 345.32 s
185
+ 2023-10-14 11:00:09.043088:
186
+ 2023-10-14 11:00:09.043208: Epoch 573
187
+ 2023-10-14 11:00:09.043312: Current learning rate: 0.00465
188
+ 2023-10-14 11:05:54.443021: train_loss -0.7607
189
+ 2023-10-14 11:05:54.443180: val_loss -0.7221
190
+ 2023-10-14 11:05:54.443288: Pseudo dice [0.9718, 0.8747, 0.9108, 0.8771, 0.8841, 0.6492]
191
+ 2023-10-14 11:05:54.443378: Epoch time: 345.4 s
192
+ 2023-10-14 11:05:55.779569:
193
+ 2023-10-14 11:05:55.779737: Epoch 574
194
+ 2023-10-14 11:05:55.779896: Current learning rate: 0.00464
195
+ 2023-10-14 11:11:41.234959: train_loss -0.7702
196
+ 2023-10-14 11:11:41.235169: val_loss -0.7058
197
+ 2023-10-14 11:11:41.235277: Pseudo dice [0.9711, 0.8677, 0.9159, 0.8803, 0.8847, 0.6971]
198
+ 2023-10-14 11:11:41.235377: Epoch time: 345.46 s
199
+ 2023-10-14 11:11:42.492971:
200
+ 2023-10-14 11:11:42.493136: Epoch 575
201
+ 2023-10-14 11:11:42.493296: Current learning rate: 0.00463
202
+ 2023-10-14 11:17:27.932884: train_loss -0.7933
203
+ 2023-10-14 11:17:27.933038: val_loss -0.6719
204
+ 2023-10-14 11:17:27.933154: Pseudo dice [0.9695, 0.8583, 0.9105, 0.8701, 0.896, 0.6404]
205
+ 2023-10-14 11:17:27.933241: Epoch time: 345.44 s
206
+ 2023-10-14 11:17:29.175566:
207
+ 2023-10-14 11:17:29.175782: Epoch 576
208
+ 2023-10-14 11:17:29.175936: Current learning rate: 0.00462
209
+ 2023-10-14 11:23:14.508476: train_loss -0.7557
210
+ 2023-10-14 11:23:14.508634: val_loss -0.678
211
+ 2023-10-14 11:23:14.508741: Pseudo dice [0.9744, 0.8663, 0.9076, 0.8893, 0.9128, 0.6918]
212
+ 2023-10-14 11:23:14.508830: Epoch time: 345.33 s
213
+ 2023-10-14 11:23:15.748343:
214
+ 2023-10-14 11:23:15.750563: Epoch 577
215
+ 2023-10-14 11:23:15.750682: Current learning rate: 0.00461
216
+ 2023-10-14 11:29:01.167244: train_loss -0.7662
217
+ 2023-10-14 11:29:01.167400: val_loss -0.6888
218
+ 2023-10-14 11:29:01.167508: Pseudo dice [0.9724, 0.8683, 0.9002, 0.8689, 0.8669, 0.6766]
219
+ 2023-10-14 11:29:01.167598: Epoch time: 345.42 s
220
+ 2023-10-14 11:29:02.412012:
221
+ 2023-10-14 11:29:02.412133: Epoch 578
222
+ 2023-10-14 11:29:02.412236: Current learning rate: 0.0046
223
+ 2023-10-14 11:34:47.791409: train_loss -0.7781
224
+ 2023-10-14 11:34:47.791618: val_loss -0.6522
225
+ 2023-10-14 11:34:47.791725: Pseudo dice [0.9755, 0.8814, 0.8988, 0.8996, 0.8768, 0.6853]
226
+ 2023-10-14 11:34:47.791815: Epoch time: 345.38 s
227
+ 2023-10-14 11:34:49.055247:
228
+ 2023-10-14 11:34:49.055444: Epoch 579
229
+ 2023-10-14 11:34:49.055591: Current learning rate: 0.00459
230
+ 2023-10-14 11:40:34.502784: train_loss -0.7883
231
+ 2023-10-14 11:40:34.502934: val_loss -0.6966
232
+ 2023-10-14 11:40:34.503041: Pseudo dice [0.9716, 0.8709, 0.9169, 0.8648, 0.8968, 0.6263]
233
+ 2023-10-14 11:40:34.503128: Epoch time: 345.45 s
234
+ 2023-10-14 11:40:35.741698:
235
+ 2023-10-14 11:40:35.741818: Epoch 580
236
+ 2023-10-14 11:40:35.741922: Current learning rate: 0.00458
237
+ 2023-10-14 11:46:21.154979: train_loss -0.776
238
+ 2023-10-14 11:46:21.155136: val_loss -0.6672
239
+ 2023-10-14 11:46:21.155242: Pseudo dice [0.9714, 0.8654, 0.9134, 0.832, 0.8995, 0.6908]
240
+ 2023-10-14 11:46:21.155330: Epoch time: 345.41 s
241
+ 2023-10-14 11:46:22.391256:
242
+ 2023-10-14 11:46:22.391378: Epoch 581
243
+ 2023-10-14 11:46:22.391483: Current learning rate: 0.00457
244
+ 2023-10-14 11:52:07.812470: train_loss -0.7667
245
+ 2023-10-14 11:52:07.812626: val_loss -0.6644
246
+ 2023-10-14 11:52:07.812735: Pseudo dice [0.9703, 0.8694, 0.9021, 0.9013, 0.9243, 0.6478]
247
+ 2023-10-14 11:52:07.812823: Epoch time: 345.42 s
248
+ 2023-10-14 11:52:09.212539:
249
+ 2023-10-14 11:52:09.212670: Epoch 582
250
+ 2023-10-14 11:52:09.212773: Current learning rate: 0.00456
251
+ 2023-10-14 11:57:54.671791: train_loss -0.7335
252
+ 2023-10-14 11:57:54.671950: val_loss -0.674
253
+ 2023-10-14 11:57:54.672076: Pseudo dice [0.9707, 0.8749, 0.9071, 0.8735, 0.8597, 0.6389]
254
+ 2023-10-14 11:57:54.672175: Epoch time: 345.46 s
255
+ 2023-10-14 11:57:55.910544:
256
+ 2023-10-14 11:57:55.910660: Epoch 583
257
+ 2023-10-14 11:57:55.910774: Current learning rate: 0.00455
258
+ 2023-10-14 12:03:41.224444: train_loss -0.7809
259
+ 2023-10-14 12:03:41.224600: val_loss -0.6982
260
+ 2023-10-14 12:03:41.224707: Pseudo dice [0.9737, 0.8681, 0.91, 0.8818, 0.8758, 0.6505]
261
+ 2023-10-14 12:03:41.224799: Epoch time: 345.31 s
262
+ 2023-10-14 12:03:42.465361:
263
+ 2023-10-14 12:03:42.465484: Epoch 584
264
+ 2023-10-14 12:03:42.465588: Current learning rate: 0.00454
265
+ 2023-10-14 12:09:27.768269: train_loss -0.7659
266
+ 2023-10-14 12:09:27.768430: val_loss -0.6903
267
+ 2023-10-14 12:09:27.768552: Pseudo dice [0.9717, 0.8757, 0.9116, 0.8827, 0.8703, 0.6597]
268
+ 2023-10-14 12:09:27.768653: Epoch time: 345.3 s
269
+ 2023-10-14 12:09:29.019685:
270
+ 2023-10-14 12:09:29.019790: Epoch 585
271
+ 2023-10-14 12:09:29.019923: Current learning rate: 0.00453
272
+ 2023-10-14 12:15:14.337044: train_loss -0.78
273
+ 2023-10-14 12:15:14.337203: val_loss -0.6906
274
+ 2023-10-14 12:15:14.337310: Pseudo dice [0.9702, 0.8768, 0.8986, 0.8672, 0.8984, 0.6971]
275
+ 2023-10-14 12:15:14.337398: Epoch time: 345.32 s
276
+ 2023-10-14 12:15:15.580557:
277
+ 2023-10-14 12:15:15.580729: Epoch 586
278
+ 2023-10-14 12:15:15.580879: Current learning rate: 0.00452
279
+ 2023-10-14 12:21:00.794588: train_loss -0.7613
280
+ 2023-10-14 12:21:00.794739: val_loss -0.6561
281
+ 2023-10-14 12:21:00.794865: Pseudo dice [0.9704, 0.869, 0.8913, 0.86, 0.8707, 0.6004]
282
+ 2023-10-14 12:21:00.794974: Epoch time: 345.21 s
283
+ 2023-10-14 12:21:02.047369:
284
+ 2023-10-14 12:21:02.047487: Epoch 587
285
+ 2023-10-14 12:21:02.047603: Current learning rate: 0.00451
286
+ 2023-10-14 12:26:47.170517: train_loss -0.7592
287
+ 2023-10-14 12:26:47.170678: val_loss -0.7093
288
+ 2023-10-14 12:26:47.170804: Pseudo dice [0.9738, 0.8839, 0.908, 0.8469, 0.8882, 0.6799]
289
+ 2023-10-14 12:26:47.170903: Epoch time: 345.12 s
290
+ 2023-10-14 12:26:48.454413:
291
+ 2023-10-14 12:26:48.454673: Epoch 588
292
+ 2023-10-14 12:26:48.454905: Current learning rate: 0.0045
293
+ 2023-10-14 12:32:33.824881: train_loss -0.7433
294
+ 2023-10-14 12:32:33.825027: val_loss -0.7018
295
+ 2023-10-14 12:32:33.825155: Pseudo dice [0.9762, 0.8921, 0.9013, 0.8873, 0.8627, 0.6741]
296
+ 2023-10-14 12:32:33.825258: Epoch time: 345.37 s
297
+ 2023-10-14 12:32:35.246654:
298
+ 2023-10-14 12:32:35.246777: Epoch 589
299
+ 2023-10-14 12:32:35.246901: Current learning rate: 0.00449
300
+ 2023-10-14 12:38:20.571609: train_loss -0.7588
301
+ 2023-10-14 12:38:20.571753: val_loss -0.6777
302
+ 2023-10-14 12:38:20.571877: Pseudo dice [0.9714, 0.8796, 0.9018, 0.9064, 0.8735, 0.662]
303
+ 2023-10-14 12:38:20.571964: Epoch time: 345.33 s
304
+ 2023-10-14 12:38:21.826131:
305
+ 2023-10-14 12:38:21.826255: Epoch 590
306
+ 2023-10-14 12:38:21.826372: Current learning rate: 0.00448
307
+ 2023-10-14 12:44:07.192964: train_loss -0.7657
308
+ 2023-10-14 12:44:07.193155: val_loss -0.6732
309
+ 2023-10-14 12:44:07.193263: Pseudo dice [0.9743, 0.8746, 0.9044, 0.879, 0.9048, 0.6682]
310
+ 2023-10-14 12:44:07.193352: Epoch time: 345.37 s
311
+ 2023-10-14 12:44:08.441660:
312
+ 2023-10-14 12:44:08.441845: Epoch 591
313
+ 2023-10-14 12:44:08.441998: Current learning rate: 0.00447
314
+ 2023-10-14 12:49:53.791086: train_loss -0.7813
315
+ 2023-10-14 12:49:53.791235: val_loss -0.6501
316
+ 2023-10-14 12:49:53.791344: Pseudo dice [0.9734, 0.8784, 0.8948, 0.8317, 0.8887, 0.6691]
317
+ 2023-10-14 12:49:53.791434: Epoch time: 345.35 s
318
+ 2023-10-14 12:49:55.040131:
319
+ 2023-10-14 12:49:55.040316: Epoch 592
320
+ 2023-10-14 12:49:55.040468: Current learning rate: 0.00446
321
+ 2023-10-14 12:55:40.360149: train_loss -0.7668
322
+ 2023-10-14 12:55:40.360301: val_loss -0.71
323
+ 2023-10-14 12:55:40.360408: Pseudo dice [0.9715, 0.8831, 0.9131, 0.89, 0.9071, 0.6549]
324
+ 2023-10-14 12:55:40.360495: Epoch time: 345.32 s
325
+ 2023-10-14 12:55:41.608411:
326
+ 2023-10-14 12:55:41.608523: Epoch 593
327
+ 2023-10-14 12:55:41.608628: Current learning rate: 0.00445
328
+ 2023-10-14 13:01:27.035468: train_loss -0.7672
329
+ 2023-10-14 13:01:27.035642: val_loss -0.695
330
+ 2023-10-14 13:01:27.035750: Pseudo dice [0.9729, 0.8742, 0.9165, 0.8934, 0.8787, 0.6867]
331
+ 2023-10-14 13:01:27.035839: Epoch time: 345.43 s
332
+ 2023-10-14 13:01:28.273444:
333
+ 2023-10-14 13:01:28.273566: Epoch 594
334
+ 2023-10-14 13:01:28.273671: Current learning rate: 0.00444
335
+ 2023-10-14 13:07:13.489238: train_loss -0.7572
336
+ 2023-10-14 13:07:13.489397: val_loss -0.6996
337
+ 2023-10-14 13:07:13.489504: Pseudo dice [0.973, 0.8829, 0.8863, 0.9056, 0.8796, 0.6745]
338
+ 2023-10-14 13:07:13.489593: Epoch time: 345.22 s
339
+ 2023-10-14 13:07:14.721650:
340
+ 2023-10-14 13:07:14.721843: Epoch 595
341
+ 2023-10-14 13:07:14.722018: Current learning rate: 0.00443
342
+ 2023-10-14 13:12:59.954200: train_loss -0.77
343
+ 2023-10-14 13:12:59.954342: val_loss -0.6637
344
+ 2023-10-14 13:12:59.954451: Pseudo dice [0.9695, 0.8752, 0.9112, 0.9069, 0.8798, 0.6646]
345
+ 2023-10-14 13:12:59.954548: Epoch time: 345.23 s
346
+ 2023-10-14 13:13:01.391377:
347
+ 2023-10-14 13:13:01.391502: Epoch 596
348
+ 2023-10-14 13:13:01.391612: Current learning rate: 0.00442
349
+ 2023-10-14 13:18:46.692570: train_loss -0.7654
350
+ 2023-10-14 13:18:46.692728: val_loss -0.7086
351
+ 2023-10-14 13:18:46.692838: Pseudo dice [0.9725, 0.8777, 0.9106, 0.8923, 0.8924, 0.6852]
352
+ 2023-10-14 13:18:46.692928: Epoch time: 345.3 s
353
+ 2023-10-14 13:18:47.935062:
354
+ 2023-10-14 13:18:47.935189: Epoch 597
355
+ 2023-10-14 13:18:47.935295: Current learning rate: 0.00441
356
+ 2023-10-14 13:24:33.156263: train_loss -0.7385
357
+ 2023-10-14 13:24:33.156409: val_loss -0.6835
358
+ 2023-10-14 13:24:33.156517: Pseudo dice [0.974, 0.8899, 0.8993, 0.8955, 0.8543, 0.6984]
359
+ 2023-10-14 13:24:33.156605: Epoch time: 345.22 s
360
+ 2023-10-14 13:24:34.406686:
361
+ 2023-10-14 13:24:34.406871: Epoch 598
362
+ 2023-10-14 13:24:34.407012: Current learning rate: 0.0044
363
+ 2023-10-14 13:30:19.651484: train_loss -0.7959
364
+ 2023-10-14 13:30:19.651639: val_loss -0.6565
365
+ 2023-10-14 13:30:19.651747: Pseudo dice [0.9677, 0.8802, 0.9159, 0.8923, 0.8631, 0.689]
366
+ 2023-10-14 13:30:19.651837: Epoch time: 345.25 s
367
+ 2023-10-14 13:30:20.914225:
368
+ 2023-10-14 13:30:20.914349: Epoch 599
369
+ 2023-10-14 13:30:20.914452: Current learning rate: 0.00439
370
+ 2023-10-14 13:36:06.187173: train_loss -0.7701
371
+ 2023-10-14 13:36:06.187314: val_loss -0.7048
372
+ 2023-10-14 13:36:06.187428: Pseudo dice [0.9732, 0.8707, 0.9143, 0.8791, 0.8855, 0.6655]
373
+ 2023-10-14 13:36:06.187528: Epoch time: 345.27 s
374
+ 2023-10-14 13:36:09.221221:
375
+ 2023-10-14 13:36:09.221345: Epoch 600
376
+ 2023-10-14 13:36:09.221469: Current learning rate: 0.00438
377
+ 2023-10-14 13:41:54.536702: train_loss -0.758
378
+ 2023-10-14 13:41:54.562621: val_loss -0.6929
379
+ 2023-10-14 13:41:54.562764: Pseudo dice [0.9703, 0.8739, 0.8939, 0.904, 0.9011, 0.673]
380
+ 2023-10-14 13:41:54.562858: Epoch time: 345.32 s
381
+ 2023-10-14 13:41:55.812133:
382
+ 2023-10-14 13:41:55.812340: Epoch 601
383
+ 2023-10-14 13:41:55.812508: Current learning rate: 0.00437
384
+ 2023-10-14 13:47:41.026841: train_loss -0.7647
385
+ 2023-10-14 13:47:41.027003: val_loss -0.7232
386
+ 2023-10-14 13:47:41.027112: Pseudo dice [0.9738, 0.8782, 0.9069, 0.8824, 0.8664, 0.6583]
387
+ 2023-10-14 13:47:41.027201: Epoch time: 345.22 s
388
+ 2023-10-14 13:47:42.263849:
389
+ 2023-10-14 13:47:42.264035: Epoch 602
390
+ 2023-10-14 13:47:42.264179: Current learning rate: 0.00436
391
+ 2023-10-14 13:53:27.466546: train_loss -0.7432
392
+ 2023-10-14 13:53:27.466691: val_loss -0.7264
393
+ 2023-10-14 13:53:27.466818: Pseudo dice [0.972, 0.8614, 0.9016, 0.8538, 0.8984, 0.6688]
394
+ 2023-10-14 13:53:27.466918: Epoch time: 345.2 s
395
+ 2023-10-14 13:53:28.878375:
396
+ 2023-10-14 13:53:28.878511: Epoch 603
397
+ 2023-10-14 13:53:28.878630: Current learning rate: 0.00435
398
+ 2023-10-14 13:59:14.072809: train_loss -0.762
399
+ 2023-10-14 13:59:14.072963: val_loss -0.691
400
+ 2023-10-14 13:59:14.073070: Pseudo dice [0.9733, 0.8664, 0.911, 0.8664, 0.9055, 0.6559]
401
+ 2023-10-14 13:59:14.073159: Epoch time: 345.2 s
402
+ 2023-10-14 13:59:15.308695:
403
+ 2023-10-14 13:59:15.308889: Epoch 604
404
+ 2023-10-14 13:59:15.309061: Current learning rate: 0.00434
405
+ 2023-10-14 14:05:00.432752: train_loss -0.7551
406
+ 2023-10-14 14:05:00.432909: val_loss -0.6737
407
+ 2023-10-14 14:05:00.433038: Pseudo dice [0.9732, 0.8841, 0.8865, 0.8843, 0.8986, 0.6903]
408
+ 2023-10-14 14:05:00.433140: Epoch time: 345.12 s
409
+ 2023-10-14 14:05:01.668171:
410
+ 2023-10-14 14:05:01.668294: Epoch 605
411
+ 2023-10-14 14:05:01.668412: Current learning rate: 0.00433
412
+ 2023-10-14 14:10:46.701825: train_loss -0.7394
413
+ 2023-10-14 14:10:46.701970: val_loss -0.6639
414
+ 2023-10-14 14:10:46.702087: Pseudo dice [0.9726, 0.8663, 0.9001, 0.882, 0.81, 0.6324]
415
+ 2023-10-14 14:10:46.702175: Epoch time: 345.03 s
416
+ 2023-10-14 14:10:47.937988:
417
+ 2023-10-14 14:10:47.938114: Epoch 606
418
+ 2023-10-14 14:10:47.938219: Current learning rate: 0.00432
419
+ 2023-10-14 14:16:33.035287: train_loss -0.7721
420
+ 2023-10-14 14:16:33.035452: val_loss -0.7028
421
+ 2023-10-14 14:16:33.035562: Pseudo dice [0.9675, 0.8681, 0.9084, 0.8931, 0.8772, 0.7123]
422
+ 2023-10-14 14:16:33.035650: Epoch time: 345.1 s
423
+ 2023-10-14 14:16:34.287815:
424
+ 2023-10-14 14:16:34.287935: Epoch 607
425
+ 2023-10-14 14:16:34.288050: Current learning rate: 0.00431
426
+ 2023-10-14 14:22:19.361040: train_loss -0.7542
427
+ 2023-10-14 14:22:19.361205: val_loss -0.6814
428
+ 2023-10-14 14:22:19.361313: Pseudo dice [0.9678, 0.8706, 0.8949, 0.8421, 0.8567, 0.639]
429
+ 2023-10-14 14:22:19.361403: Epoch time: 345.07 s
430
+ 2023-10-14 14:22:20.597335:
431
+ 2023-10-14 14:22:20.597454: Epoch 608
432
+ 2023-10-14 14:22:20.597558: Current learning rate: 0.0043
433
+ 2023-10-14 14:28:05.865697: train_loss -0.7874
434
+ 2023-10-14 14:28:05.865849: val_loss -0.689
435
+ 2023-10-14 14:28:05.865958: Pseudo dice [0.9689, 0.8736, 0.8951, 0.8791, 0.9071, 0.6724]
436
+ 2023-10-14 14:28:05.866059: Epoch time: 345.27 s
437
+ 2023-10-14 14:28:07.096414:
438
+ 2023-10-14 14:28:07.096534: Epoch 609
439
+ 2023-10-14 14:28:07.096638: Current learning rate: 0.00429
440
+ 2023-10-14 14:33:52.213276: train_loss -0.7678
441
+ 2023-10-14 14:33:52.213439: val_loss -0.6511
442
+ 2023-10-14 14:33:52.213578: Pseudo dice [0.9682, 0.8618, 0.8946, 0.8588, 0.852, 0.6899]
443
+ 2023-10-14 14:33:52.213680: Epoch time: 345.12 s
444
+ 2023-10-14 14:33:53.617540:
445
+ 2023-10-14 14:33:53.617666: Epoch 610
446
+ 2023-10-14 14:33:53.617785: Current learning rate: 0.00429
447
+ 2023-10-14 14:39:38.881116: train_loss -0.7615
448
+ 2023-10-14 14:39:38.881281: val_loss -0.685
449
+ 2023-10-14 14:39:38.881410: Pseudo dice [0.9724, 0.875, 0.9019, 0.9056, 0.913, 0.686]
450
+ 2023-10-14 14:39:38.881510: Epoch time: 345.26 s
451
+ 2023-10-14 14:39:40.115401:
452
+ 2023-10-14 14:39:40.115527: Epoch 611
453
+ 2023-10-14 14:39:40.115643: Current learning rate: 0.00428
454
+ 2023-10-14 14:45:25.545999: train_loss -0.7808
455
+ 2023-10-14 14:45:25.546160: val_loss -0.7257
456
+ 2023-10-14 14:45:25.546287: Pseudo dice [0.973, 0.8702, 0.9215, 0.8727, 0.8979, 0.638]
457
+ 2023-10-14 14:45:25.546388: Epoch time: 345.43 s
458
+ 2023-10-14 14:45:26.776140:
459
+ 2023-10-14 14:45:26.776267: Epoch 612
460
+ 2023-10-14 14:45:26.776371: Current learning rate: 0.00427
461
+ 2023-10-14 14:51:12.205135: train_loss -0.779
462
+ 2023-10-14 14:51:12.205280: val_loss -0.7399
463
+ 2023-10-14 14:51:12.205399: Pseudo dice [0.9719, 0.8736, 0.9185, 0.8698, 0.8706, 0.7204]
464
+ 2023-10-14 14:51:12.205492: Epoch time: 345.43 s
465
+ 2023-10-14 14:51:13.434371:
466
+ 2023-10-14 14:51:13.434646: Epoch 613
467
+ 2023-10-14 14:51:13.434908: Current learning rate: 0.00426
468
+ 2023-10-14 14:56:58.723003: train_loss -0.755
469
+ 2023-10-14 14:56:58.723155: val_loss -0.687
470
+ 2023-10-14 14:56:58.723260: Pseudo dice [0.9701, 0.8712, 0.9066, 0.897, 0.8999, 0.6792]
471
+ 2023-10-14 14:56:58.723349: Epoch time: 345.29 s
472
+ 2023-10-14 14:56:59.948792:
473
+ 2023-10-14 14:56:59.948977: Epoch 614
474
+ 2023-10-14 14:56:59.949131: Current learning rate: 0.00425
475
+ 2023-10-14 15:02:45.222680: train_loss -0.7638
476
+ 2023-10-14 15:02:45.222840: val_loss -0.678
477
+ 2023-10-14 15:02:45.222947: Pseudo dice [0.9662, 0.8619, 0.9091, 0.878, 0.9066, 0.6529]
478
+ 2023-10-14 15:02:45.223037: Epoch time: 345.27 s
479
+ 2023-10-14 15:02:46.451826:
480
+ 2023-10-14 15:02:46.452006: Epoch 615
481
+ 2023-10-14 15:02:46.452170: Current learning rate: 0.00424
482
+ 2023-10-14 15:08:31.715137: train_loss -0.7744
483
+ 2023-10-14 15:08:31.715300: val_loss -0.649
484
+ 2023-10-14 15:08:31.715408: Pseudo dice [0.9734, 0.8757, 0.9048, 0.8251, 0.8372, 0.6787]
485
+ 2023-10-14 15:08:31.715497: Epoch time: 345.26 s
486
+ 2023-10-14 15:08:32.946714:
487
+ 2023-10-14 15:08:32.946826: Epoch 616
488
+ 2023-10-14 15:08:32.946929: Current learning rate: 0.00423
489
+ 2023-10-14 15:14:18.370008: train_loss -0.7348
490
+ 2023-10-14 15:14:18.370180: val_loss -0.7027
491
+ 2023-10-14 15:14:18.370288: Pseudo dice [0.9722, 0.8598, 0.9081, 0.9042, 0.8572, 0.6507]
492
+ 2023-10-14 15:14:18.370378: Epoch time: 345.42 s
493
+ 2023-10-14 15:14:19.774088:
494
+ 2023-10-14 15:14:19.774227: Epoch 617
495
+ 2023-10-14 15:14:19.774343: Current learning rate: 0.00422
496
+ 2023-10-14 15:20:05.151113: train_loss -0.7712
497
+ 2023-10-14 15:20:05.151275: val_loss -0.6953
498
+ 2023-10-14 15:20:05.151402: Pseudo dice [0.9739, 0.8773, 0.9011, 0.8979, 0.8697, 0.6598]
499
+ 2023-10-14 15:20:05.151501: Epoch time: 345.38 s
500
+ 2023-10-14 15:20:06.379423:
501
+ 2023-10-14 15:20:06.379615: Epoch 618
502
+ 2023-10-14 15:20:06.379761: Current learning rate: 0.00421
503
+ 2023-10-14 15:25:51.732977: train_loss -0.7603
504
+ 2023-10-14 15:25:51.733143: val_loss -0.6633
505
+ 2023-10-14 15:25:51.733252: Pseudo dice [0.973, 0.8762, 0.8955, 0.8469, 0.8547, 0.6434]
506
+ 2023-10-14 15:25:51.733340: Epoch time: 345.35 s
507
+ 2023-10-14 15:25:52.956948:
508
+ 2023-10-14 15:25:52.957223: Epoch 619
509
+ 2023-10-14 15:25:52.957383: Current learning rate: 0.0042
510
+ 2023-10-14 15:31:38.207267: train_loss -0.7639
511
+ 2023-10-14 15:31:38.207419: val_loss -0.6473
512
+ 2023-10-14 15:31:38.207534: Pseudo dice [0.9733, 0.8753, 0.9119, 0.9126, 0.8255, 0.6438]
513
+ 2023-10-14 15:31:38.207627: Epoch time: 345.25 s
514
+ 2023-10-14 15:31:39.433022:
515
+ 2023-10-14 15:31:39.433202: Epoch 620
516
+ 2023-10-14 15:31:39.433369: Current learning rate: 0.00419
517
+ 2023-10-14 15:37:24.750193: train_loss -0.7808
518
+ 2023-10-14 15:37:24.750353: val_loss -0.6758
519
+ 2023-10-14 15:37:24.750461: Pseudo dice [0.9724, 0.8676, 0.9047, 0.8751, 0.8983, 0.6504]
520
+ 2023-10-14 15:37:24.750569: Epoch time: 345.32 s
521
+ 2023-10-14 15:37:26.024541:
522
+ 2023-10-14 15:37:26.024662: Epoch 621
523
+ 2023-10-14 15:37:26.024769: Current learning rate: 0.00418
524
+ 2023-10-14 15:43:11.284546: train_loss -0.7552
525
+ 2023-10-14 15:43:11.284713: val_loss -0.6358
526
+ 2023-10-14 15:43:11.284839: Pseudo dice [0.9726, 0.8642, 0.8908, 0.8684, 0.9013, 0.6421]
527
+ 2023-10-14 15:43:11.284938: Epoch time: 345.26 s
528
+ 2023-10-14 15:43:12.513359:
529
+ 2023-10-14 15:43:12.513530: Epoch 622
530
+ 2023-10-14 15:43:12.513690: Current learning rate: 0.00417
531
+ 2023-10-14 15:48:57.758520: train_loss -0.771
532
+ 2023-10-14 15:48:57.758674: val_loss -0.6537
533
+ 2023-10-14 15:48:57.758806: Pseudo dice [0.9727, 0.8635, 0.9034, 0.8372, 0.8479, 0.6164]
534
+ 2023-10-14 15:48:57.758916: Epoch time: 345.25 s
535
+ 2023-10-14 15:48:58.997432:
536
+ 2023-10-14 15:48:58.997557: Epoch 623
537
+ 2023-10-14 15:48:58.997694: Current learning rate: 0.00416
538
+ 2023-10-14 15:54:44.139509: train_loss -0.792
539
+ 2023-10-14 15:54:44.139672: val_loss -0.6581
540
+ 2023-10-14 15:54:44.139781: Pseudo dice [0.9724, 0.8767, 0.9003, 0.8994, 0.8994, 0.6691]
541
+ 2023-10-14 15:54:44.139871: Epoch time: 345.14 s
542
+ 2023-10-14 15:54:45.546206:
543
+ 2023-10-14 15:54:45.546422: Epoch 624
544
+ 2023-10-14 15:54:45.546611: Current learning rate: 0.00415
545
+ 2023-10-14 16:00:30.779316: train_loss -0.7774
546
+ 2023-10-14 16:00:30.779478: val_loss -0.6956
547
+ 2023-10-14 16:00:30.779586: Pseudo dice [0.9714, 0.875, 0.9005, 0.8732, 0.8709, 0.6667]
548
+ 2023-10-14 16:00:30.779676: Epoch time: 345.23 s
549
+ 2023-10-14 16:00:32.006842:
550
+ 2023-10-14 16:00:32.006967: Epoch 625
551
+ 2023-10-14 16:00:32.007073: Current learning rate: 0.00414
552
+ 2023-10-14 16:06:17.287866: train_loss -0.7442
553
+ 2023-10-14 16:06:17.288028: val_loss -0.6678
554
+ 2023-10-14 16:06:17.288138: Pseudo dice [0.9715, 0.8641, 0.9098, 0.7926, 0.8992, 0.6277]
555
+ 2023-10-14 16:06:17.288226: Epoch time: 345.28 s
556
+ 2023-10-14 16:06:18.518711:
557
+ 2023-10-14 16:06:18.518934: Epoch 626
558
+ 2023-10-14 16:06:18.519116: Current learning rate: 0.00413
559
+ 2023-10-14 16:12:03.838896: train_loss -0.7532
560
+ 2023-10-14 16:12:03.839072: val_loss -0.6503
561
+ 2023-10-14 16:12:03.839226: Pseudo dice [0.9714, 0.8681, 0.9088, 0.8553, 0.8588, 0.6468]
562
+ 2023-10-14 16:12:03.839345: Epoch time: 345.32 s
563
+ 2023-10-14 16:12:05.069823:
564
+ 2023-10-14 16:12:05.069941: Epoch 627
565
+ 2023-10-14 16:12:05.070088: Current learning rate: 0.00412
566
+ 2023-10-14 16:17:50.271927: train_loss -0.7608
567
+ 2023-10-14 16:17:50.272089: val_loss -0.7213
568
+ 2023-10-14 16:17:50.272195: Pseudo dice [0.9718, 0.8799, 0.9169, 0.8719, 0.8759, 0.6543]
569
+ 2023-10-14 16:17:50.272283: Epoch time: 345.2 s
570
+ 2023-10-14 16:17:51.503624:
571
+ 2023-10-14 16:17:51.503731: Epoch 628
572
+ 2023-10-14 16:17:51.503853: Current learning rate: 0.00411
573
+ 2023-10-14 16:23:36.594076: train_loss -0.7626
574
+ 2023-10-14 16:23:36.594239: val_loss -0.6653
575
+ 2023-10-14 16:23:36.594346: Pseudo dice [0.9741, 0.8788, 0.9138, 0.8925, 0.8815, 0.6214]
576
+ 2023-10-14 16:23:36.594435: Epoch time: 345.09 s
577
+ 2023-10-14 16:23:37.819789:
578
+ 2023-10-14 16:23:37.819902: Epoch 629
579
+ 2023-10-14 16:23:37.820030: Current learning rate: 0.0041
580
+ 2023-10-14 16:29:22.992501: train_loss -0.7483
581
+ 2023-10-14 16:29:22.992672: val_loss -0.6787
582
+ 2023-10-14 16:29:22.992779: Pseudo dice [0.9755, 0.8821, 0.8981, 0.8838, 0.8583, 0.7084]
583
+ 2023-10-14 16:29:22.992866: Epoch time: 345.17 s
584
+ 2023-10-14 16:29:24.221108:
585
+ 2023-10-14 16:29:24.221229: Epoch 630
586
+ 2023-10-14 16:29:24.221332: Current learning rate: 0.00409
587
+ 2023-10-14 16:35:09.353429: train_loss -0.7734
588
+ 2023-10-14 16:35:09.353594: val_loss -0.6573
589
+ 2023-10-14 16:35:09.353700: Pseudo dice [0.9677, 0.8737, 0.8995, 0.9009, 0.9125, 0.6357]
590
+ 2023-10-14 16:35:09.353797: Epoch time: 345.13 s
591
+ 2023-10-14 16:35:10.758466:
592
+ 2023-10-14 16:35:10.758607: Epoch 631
593
+ 2023-10-14 16:35:10.758712: Current learning rate: 0.00408
594
+ 2023-10-14 16:40:55.876935: train_loss -0.7641
595
+ 2023-10-14 16:40:55.877102: val_loss -0.6507
596
+ 2023-10-14 16:40:55.877269: Pseudo dice [0.9708, 0.8651, 0.8982, 0.8435, 0.911, 0.6598]
597
+ 2023-10-14 16:40:55.877389: Epoch time: 345.12 s
598
+ 2023-10-14 16:40:57.107229:
599
+ 2023-10-14 16:40:57.107358: Epoch 632
600
+ 2023-10-14 16:40:57.107470: Current learning rate: 0.00407
601
+ 2023-10-14 16:46:42.215181: train_loss -0.7705
602
+ 2023-10-14 16:46:42.215334: val_loss -0.6972
603
+ 2023-10-14 16:46:42.215441: Pseudo dice [0.971, 0.8804, 0.8964, 0.8674, 0.9125, 0.6708]
604
+ 2023-10-14 16:46:42.215530: Epoch time: 345.11 s
605
+ 2023-10-14 16:46:43.440230:
606
+ 2023-10-14 16:46:43.440356: Epoch 633
607
+ 2023-10-14 16:46:43.440461: Current learning rate: 0.00406
608
+ 2023-10-14 16:52:28.556360: train_loss -0.7688
609
+ 2023-10-14 16:52:28.556517: val_loss -0.6371
610
+ 2023-10-14 16:52:28.556624: Pseudo dice [0.9736, 0.8787, 0.8855, 0.893, 0.8857, 0.6842]
611
+ 2023-10-14 16:52:28.556714: Epoch time: 345.12 s
612
+ 2023-10-14 16:52:29.781759:
613
+ 2023-10-14 16:52:29.781941: Epoch 634
614
+ 2023-10-14 16:52:29.782093: Current learning rate: 0.00405
615
+ 2023-10-14 16:58:14.835334: train_loss -0.7634
616
+ 2023-10-14 16:58:14.835504: val_loss -0.7116
617
+ 2023-10-14 16:58:14.835631: Pseudo dice [0.9733, 0.8843, 0.8985, 0.8945, 0.8925, 0.6721]
618
+ 2023-10-14 16:58:14.835730: Epoch time: 345.05 s
619
+ 2023-10-14 16:58:16.061664:
620
+ 2023-10-14 16:58:16.061890: Epoch 635
621
+ 2023-10-14 16:58:16.062093: Current learning rate: 0.00404
622
+ 2023-10-14 17:04:01.232498: train_loss -0.7566
623
+ 2023-10-14 17:04:01.232659: val_loss -0.6579
624
+ 2023-10-14 17:04:01.232765: Pseudo dice [0.9712, 0.8645, 0.8988, 0.9094, 0.9212, 0.6647]
625
+ 2023-10-14 17:04:01.232853: Epoch time: 345.17 s
626
+ 2023-10-14 17:04:02.460922:
627
+ 2023-10-14 17:04:02.461122: Epoch 636
628
+ 2023-10-14 17:04:02.461257: Current learning rate: 0.00403
629
+ 2023-10-14 17:09:47.632440: train_loss -0.7576
630
+ 2023-10-14 17:09:47.632618: val_loss -0.6736
631
+ 2023-10-14 17:09:47.632725: Pseudo dice [0.9737, 0.8742, 0.9068, 0.8537, 0.9097, 0.671]
632
+ 2023-10-14 17:09:47.632813: Epoch time: 345.17 s
633
+ 2023-10-14 17:09:49.027920:
634
+ 2023-10-14 17:09:49.028105: Epoch 637
635
+ 2023-10-14 17:09:49.028272: Current learning rate: 0.00402
636
+ 2023-10-14 17:15:34.278707: train_loss -0.7735
637
+ 2023-10-14 17:15:34.278879: val_loss -0.6382
638
+ 2023-10-14 17:15:34.278991: Pseudo dice [0.9737, 0.8726, 0.9154, 0.8873, 0.8475, 0.6396]
639
+ 2023-10-14 17:15:34.279081: Epoch time: 345.25 s
640
+ 2023-10-14 17:15:35.508776:
641
+ 2023-10-14 17:15:35.508887: Epoch 638
642
+ 2023-10-14 17:15:35.508996: Current learning rate: 0.00401
643
+ 2023-10-14 17:21:20.734416: train_loss -0.7648
644
+ 2023-10-14 17:21:20.734588: val_loss -0.7062
645
+ 2023-10-14 17:21:20.734701: Pseudo dice [0.9703, 0.8699, 0.9038, 0.905, 0.9083, 0.6299]
646
+ 2023-10-14 17:21:20.734789: Epoch time: 345.23 s
647
+ 2023-10-14 17:21:21.964436:
648
+ 2023-10-14 17:21:21.964605: Epoch 639
649
+ 2023-10-14 17:21:21.964769: Current learning rate: 0.004
650
+ 2023-10-14 17:27:07.221666: train_loss -0.7648
651
+ 2023-10-14 17:27:07.221833: val_loss -0.6922
652
+ 2023-10-14 17:27:07.221941: Pseudo dice [0.9724, 0.8682, 0.9092, 0.8575, 0.9191, 0.6126]
653
+ 2023-10-14 17:27:07.222028: Epoch time: 345.26 s
654
+ 2023-10-14 17:27:08.456499:
655
+ 2023-10-14 17:27:08.456623: Epoch 640
656
+ 2023-10-14 17:27:08.456729: Current learning rate: 0.00399
657
+ 2023-10-14 17:32:53.649395: train_loss -0.7576
658
+ 2023-10-14 17:32:53.649562: val_loss -0.6936
659
+ 2023-10-14 17:32:53.649688: Pseudo dice [0.9735, 0.8827, 0.9038, 0.8838, 0.8844, 0.6632]
660
+ 2023-10-14 17:32:53.649786: Epoch time: 345.19 s
661
+ 2023-10-14 17:32:54.871769:
662
+ 2023-10-14 17:32:54.871948: Epoch 641
663
+ 2023-10-14 17:32:54.872139: Current learning rate: 0.00398
664
+ 2023-10-14 17:38:39.993190: train_loss -0.752
665
+ 2023-10-14 17:38:39.993353: val_loss -0.656
666
+ 2023-10-14 17:38:39.993459: Pseudo dice [0.9706, 0.873, 0.9123, 0.9178, 0.9002, 0.678]
667
+ 2023-10-14 17:38:39.993546: Epoch time: 345.12 s
668
+ 2023-10-14 17:38:41.223131:
669
+ 2023-10-14 17:38:41.223406: Epoch 642
670
+ 2023-10-14 17:38:41.223625: Current learning rate: 0.00397
671
+ 2023-10-14 17:44:26.419851: train_loss -0.7631
672
+ 2023-10-14 17:44:26.420016: val_loss -0.6677
673
+ 2023-10-14 17:44:26.420125: Pseudo dice [0.9716, 0.8758, 0.9048, 0.8369, 0.8966, 0.5712]
674
+ 2023-10-14 17:44:26.420213: Epoch time: 345.2 s
675
+ 2023-10-14 17:44:27.645399:
676
+ 2023-10-14 17:44:27.645557: Epoch 643
677
+ 2023-10-14 17:44:27.645711: Current learning rate: 0.00396
678
+ 2023-10-14 17:50:12.877445: train_loss -0.7848
679
+ 2023-10-14 17:50:12.877609: val_loss -0.6909
680
+ 2023-10-14 17:50:12.877716: Pseudo dice [0.9734, 0.8767, 0.8875, 0.9047, 0.8963, 0.6348]
681
+ 2023-10-14 17:50:12.877804: Epoch time: 345.23 s
682
+ 2023-10-14 17:50:14.322228:
683
+ 2023-10-14 17:50:14.322353: Epoch 644
684
+ 2023-10-14 17:50:14.322461: Current learning rate: 0.00395
685
+ 2023-10-14 17:55:59.667900: train_loss -0.7964
686
+ 2023-10-14 17:55:59.668062: val_loss -0.6591
687
+ 2023-10-14 17:55:59.668168: Pseudo dice [0.9715, 0.8696, 0.9139, 0.8615, 0.8939, 0.6804]
688
+ 2023-10-14 17:55:59.668256: Epoch time: 345.35 s
689
+ 2023-10-14 17:56:00.962695:
690
+ 2023-10-14 17:56:00.963004: Epoch 645
691
+ 2023-10-14 17:56:00.963206: Current learning rate: 0.00394
692
+ 2023-10-14 18:01:46.228445: train_loss -0.7786
693
+ 2023-10-14 18:01:46.272744: val_loss -0.6726
694
+ 2023-10-14 18:01:46.273038: Pseudo dice [0.9693, 0.8762, 0.9047, 0.8573, 0.889, 0.665]
695
+ 2023-10-14 18:01:46.273135: Epoch time: 345.27 s
696
+ 2023-10-14 18:01:47.589313:
697
+ 2023-10-14 18:01:47.589441: Epoch 646
698
+ 2023-10-14 18:01:47.589548: Current learning rate: 0.00393
699
+ 2023-10-14 18:07:32.782410: train_loss -0.7808
700
+ 2023-10-14 18:07:32.806095: val_loss -0.665
701
+ 2023-10-14 18:07:32.806214: Pseudo dice [0.9722, 0.8793, 0.9051, 0.8769, 0.888, 0.6735]
702
+ 2023-10-14 18:07:32.806305: Epoch time: 345.19 s
703
+ 2023-10-14 18:07:34.099209:
704
+ 2023-10-14 18:07:34.099390: Epoch 647
705
+ 2023-10-14 18:07:34.099658: Current learning rate: 0.00392
706
+ 2023-10-14 18:13:19.320418: train_loss -0.7735
707
+ 2023-10-14 18:13:19.338535: val_loss -0.72
708
+ 2023-10-14 18:13:19.338682: Pseudo dice [0.9723, 0.8818, 0.9029, 0.9075, 0.9213, 0.6385]
709
+ 2023-10-14 18:13:19.338782: Epoch time: 345.22 s
710
+ 2023-10-14 18:13:20.618846:
711
+ 2023-10-14 18:13:20.618957: Epoch 648
712
+ 2023-10-14 18:13:20.619078: Current learning rate: 0.00391
713
+ 2023-10-14 18:19:05.833261: train_loss -0.7531
714
+ 2023-10-14 18:19:05.854315: val_loss -0.7067
715
+ 2023-10-14 18:19:05.854431: Pseudo dice [0.9717, 0.8727, 0.9134, 0.8782, 0.9091, 0.6161]
716
+ 2023-10-14 18:19:05.854533: Epoch time: 345.22 s
717
+ 2023-10-14 18:19:07.083223:
718
+ 2023-10-14 18:19:07.083346: Epoch 649
719
+ 2023-10-14 18:19:07.083539: Current learning rate: 0.0039
720
+ 2023-10-14 18:24:52.414732: train_loss -0.7412
721
+ 2023-10-14 18:24:52.414915: val_loss -0.6814
722
+ 2023-10-14 18:24:52.415040: Pseudo dice [0.9731, 0.8838, 0.9099, 0.8508, 0.9159, 0.6808]
723
+ 2023-10-14 18:24:52.415140: Epoch time: 345.33 s
724
+ 2023-10-14 18:24:55.490115:
725
+ 2023-10-14 18:24:55.490302: Epoch 650
726
+ 2023-10-14 18:24:55.490507: Current learning rate: 0.00389
727
+ 2023-10-14 18:30:40.699064: train_loss -0.7826
728
+ 2023-10-14 18:30:40.699532: val_loss -0.6758
729
+ 2023-10-14 18:30:40.699661: Pseudo dice [0.9737, 0.868, 0.8963, 0.8625, 0.8257, 0.6266]
730
+ 2023-10-14 18:30:40.699759: Epoch time: 345.21 s
731
+ 2023-10-14 18:30:42.128822:
732
+ 2023-10-14 18:30:42.129018: Epoch 651
733
+ 2023-10-14 18:30:42.129209: Current learning rate: 0.00388
734
+ 2023-10-14 18:36:27.414385: train_loss -0.7857
735
+ 2023-10-14 18:36:27.414559: val_loss -0.6537
736
+ 2023-10-14 18:36:27.414689: Pseudo dice [0.9745, 0.8709, 0.9013, 0.8435, 0.87, 0.6651]
737
+ 2023-10-14 18:36:27.414789: Epoch time: 345.29 s
738
+ 2023-10-14 18:36:28.638571:
739
+ 2023-10-14 18:36:28.638687: Epoch 652
740
+ 2023-10-14 18:36:28.638800: Current learning rate: 0.00387
741
+ 2023-10-14 18:42:13.898769: train_loss -0.7566
742
+ 2023-10-14 18:42:13.898952: val_loss -0.6672
743
+ 2023-10-14 18:42:13.899078: Pseudo dice [0.9721, 0.8805, 0.905, 0.8985, 0.8761, 0.6544]
744
+ 2023-10-14 18:42:13.899177: Epoch time: 345.26 s
745
+ 2023-10-14 18:42:15.124807:
746
+ 2023-10-14 18:42:15.125002: Epoch 653
747
+ 2023-10-14 18:42:15.125160: Current learning rate: 0.00386
748
+ 2023-10-14 18:48:00.439467: train_loss -0.7553
749
+ 2023-10-14 18:48:00.439612: val_loss -0.6673
750
+ 2023-10-14 18:48:00.439730: Pseudo dice [0.9724, 0.8704, 0.8984, 0.8762, 0.8767, 0.6696]
751
+ 2023-10-14 18:48:00.439824: Epoch time: 345.32 s
752
+ 2023-10-14 18:48:01.674665:
753
+ 2023-10-14 18:48:01.674914: Epoch 654
754
+ 2023-10-14 18:48:01.675084: Current learning rate: 0.00385
755
+ 2023-10-14 18:53:46.911062: train_loss -0.7787
756
+ 2023-10-14 18:53:46.911207: val_loss -0.6667
757
+ 2023-10-14 18:53:46.911326: Pseudo dice [0.9731, 0.8704, 0.9041, 0.8151, 0.8472, 0.6625]
758
+ 2023-10-14 18:53:46.911428: Epoch time: 345.24 s
759
+ 2023-10-14 18:53:48.138912:
760
+ 2023-10-14 18:53:48.139034: Epoch 655
761
+ 2023-10-14 18:53:48.139151: Current learning rate: 0.00384
762
+ 2023-10-14 18:59:33.442257: train_loss -0.7725
763
+ 2023-10-14 18:59:33.442420: val_loss -0.6797
764
+ 2023-10-14 18:59:33.442545: Pseudo dice [0.9744, 0.8791, 0.9072, 0.8569, 0.8972, 0.6969]
765
+ 2023-10-14 18:59:33.442636: Epoch time: 345.3 s
766
+ 2023-10-14 18:59:34.668129:
767
+ 2023-10-14 18:59:34.668350: Epoch 656
768
+ 2023-10-14 18:59:34.668544: Current learning rate: 0.00383
769
+ 2023-10-14 19:05:19.916166: train_loss -0.7891
770
+ 2023-10-14 19:05:19.916329: val_loss -0.6891
771
+ 2023-10-14 19:05:19.916438: Pseudo dice [0.9716, 0.8722, 0.8968, 0.8606, 0.8814, 0.6483]
772
+ 2023-10-14 19:05:19.916527: Epoch time: 345.25 s
773
+ 2023-10-14 19:05:21.143101:
774
+ 2023-10-14 19:05:21.143220: Epoch 657
775
+ 2023-10-14 19:05:21.143395: Current learning rate: 0.00382
776
+ 2023-10-14 19:11:06.492922: train_loss -0.748
777
+ 2023-10-14 19:11:06.493066: val_loss -0.6908
778
+ 2023-10-14 19:11:06.493173: Pseudo dice [0.9607, 0.8645, 0.9061, 0.8701, 0.8748, 0.654]
779
+ 2023-10-14 19:11:06.493262: Epoch time: 345.35 s
780
+ 2023-10-14 19:11:08.721048:
781
+ 2023-10-14 19:11:08.721277: Epoch 658
782
+ 2023-10-14 19:11:08.721462: Current learning rate: 0.00381
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/training_log_2023_10_16_11_52_25.txt ADDED
@@ -0,0 +1,887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset720_TSPrime', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1620.0, 'mean': -38.229164123535156, 'median': -54.0, 'min': -1000.0, 'percentile_00_5': -941.0, 'percentile_99_5': 897.0, 'std': 192.37086486816406}}}
14
+
15
+ 2023-10-16 11:52:36.021841: unpacking dataset...
16
+ 2023-10-16 11:52:39.807831: unpacking done...
17
+ 2023-10-16 11:52:39.809407: do_dummy_2d_data_aug: False
18
+ 2023-10-16 11:52:39.809969: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset720_TSPrime/splits_final.json
19
+ 2023-10-16 11:52:39.826854: The split file contains 5 splits.
20
+ 2023-10-16 11:52:39.826942: Desired fold for training: 0
21
+ 2023-10-16 11:52:39.827013: This split has 48 training and 12 validation cases.
22
+ 2023-10-16 11:53:10.871201: Unable to plot network architecture:
23
+ 2023-10-16 11:53:10.871277: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-10-16 11:53:10.957982:
25
+ 2023-10-16 11:53:10.958041: Epoch 650
26
+ 2023-10-16 11:53:10.958157: Current learning rate: 0.00389
27
+ 2023-10-16 12:01:33.061845: train_loss -0.7657
28
+ 2023-10-16 12:01:33.126682: val_loss -0.6653
29
+ 2023-10-16 12:01:33.126856: Pseudo dice [0.9702, 0.8726, 0.8798, 0.9072, 0.9047, 0.6282]
30
+ 2023-10-16 12:01:33.126974: Epoch time: 502.1 s
31
+ 2023-10-16 12:01:35.984811:
32
+ 2023-10-16 12:01:35.985008: Epoch 651
33
+ 2023-10-16 12:01:35.985184: Current learning rate: 0.00388
34
+ 2023-10-16 12:07:22.139421: train_loss -0.773
35
+ 2023-10-16 12:07:22.139565: val_loss -0.6575
36
+ 2023-10-16 12:07:22.139684: Pseudo dice [0.9719, 0.8755, 0.8979, 0.8985, 0.9027, 0.6282]
37
+ 2023-10-16 12:07:22.139770: Epoch time: 346.16 s
38
+ 2023-10-16 12:07:23.367573:
39
+ 2023-10-16 12:07:23.367742: Epoch 652
40
+ 2023-10-16 12:07:23.367918: Current learning rate: 0.00387
41
+ 2023-10-16 12:13:09.660832: train_loss -0.7574
42
+ 2023-10-16 12:13:09.660987: val_loss -0.68
43
+ 2023-10-16 12:13:09.661093: Pseudo dice [0.9742, 0.8747, 0.9106, 0.8856, 0.8901, 0.6318]
44
+ 2023-10-16 12:13:09.661180: Epoch time: 346.29 s
45
+ 2023-10-16 12:13:10.920336:
46
+ 2023-10-16 12:13:10.920450: Epoch 653
47
+ 2023-10-16 12:13:10.920563: Current learning rate: 0.00386
48
+ 2023-10-16 12:18:57.220682: train_loss -0.7666
49
+ 2023-10-16 12:18:57.220850: val_loss -0.6945
50
+ 2023-10-16 12:18:57.220954: Pseudo dice [0.9703, 0.8777, 0.9185, 0.8674, 0.9026, 0.646]
51
+ 2023-10-16 12:18:57.221044: Epoch time: 346.3 s
52
+ 2023-10-16 12:18:58.643119:
53
+ 2023-10-16 12:18:58.643296: Epoch 654
54
+ 2023-10-16 12:18:58.643456: Current learning rate: 0.00385
55
+ 2023-10-16 12:24:45.117256: train_loss -0.7659
56
+ 2023-10-16 12:24:45.117415: val_loss -0.7027
57
+ 2023-10-16 12:24:45.117518: Pseudo dice [0.9738, 0.8922, 0.9142, 0.8803, 0.8772, 0.685]
58
+ 2023-10-16 12:24:45.117602: Epoch time: 346.47 s
59
+ 2023-10-16 12:24:46.332685:
60
+ 2023-10-16 12:24:46.332862: Epoch 655
61
+ 2023-10-16 12:24:46.333028: Current learning rate: 0.00384
62
+ 2023-10-16 12:30:32.836236: train_loss -0.7869
63
+ 2023-10-16 12:30:32.836398: val_loss -0.6938
64
+ 2023-10-16 12:30:32.836502: Pseudo dice [0.9713, 0.8664, 0.8991, 0.8783, 0.8994, 0.6656]
65
+ 2023-10-16 12:30:32.836591: Epoch time: 346.5 s
66
+ 2023-10-16 12:30:34.058037:
67
+ 2023-10-16 12:30:34.058140: Epoch 656
68
+ 2023-10-16 12:30:34.058258: Current learning rate: 0.00383
69
+ 2023-10-16 12:36:20.418155: train_loss -0.7692
70
+ 2023-10-16 12:36:20.418319: val_loss -0.6525
71
+ 2023-10-16 12:36:20.418422: Pseudo dice [0.9751, 0.8812, 0.9, 0.924, 0.8911, 0.6772]
72
+ 2023-10-16 12:36:20.418515: Epoch time: 346.36 s
73
+ 2023-10-16 12:36:21.640579:
74
+ 2023-10-16 12:36:21.640756: Epoch 657
75
+ 2023-10-16 12:36:21.640867: Current learning rate: 0.00382
76
+ 2023-10-16 12:42:08.193615: train_loss -0.7616
77
+ 2023-10-16 12:42:08.193762: val_loss -0.6783
78
+ 2023-10-16 12:42:08.193863: Pseudo dice [0.9695, 0.8775, 0.9076, 0.8641, 0.8654, 0.647]
79
+ 2023-10-16 12:42:08.193949: Epoch time: 346.55 s
80
+ 2023-10-16 12:42:09.417391:
81
+ 2023-10-16 12:42:09.417494: Epoch 658
82
+ 2023-10-16 12:42:09.417598: Current learning rate: 0.00381
83
+ 2023-10-16 12:47:55.878795: train_loss -0.75
84
+ 2023-10-16 12:47:55.878945: val_loss -0.7167
85
+ 2023-10-16 12:47:55.879050: Pseudo dice [0.971, 0.8696, 0.8976, 0.8648, 0.9152, 0.6271]
86
+ 2023-10-16 12:47:55.879137: Epoch time: 346.46 s
87
+ 2023-10-16 12:47:57.113813:
88
+ 2023-10-16 12:47:57.113915: Epoch 659
89
+ 2023-10-16 12:47:57.114023: Current learning rate: 0.0038
90
+ 2023-10-16 12:53:43.509501: train_loss -0.7638
91
+ 2023-10-16 12:53:43.509643: val_loss -0.6401
92
+ 2023-10-16 12:53:43.509757: Pseudo dice [0.9735, 0.8785, 0.9114, 0.8061, 0.8515, 0.6451]
93
+ 2023-10-16 12:53:43.509848: Epoch time: 346.4 s
94
+ 2023-10-16 12:53:44.733188:
95
+ 2023-10-16 12:53:44.733356: Epoch 660
96
+ 2023-10-16 12:53:44.733542: Current learning rate: 0.00379
97
+ 2023-10-16 12:59:31.136624: train_loss -0.7907
98
+ 2023-10-16 12:59:31.136807: val_loss -0.6695
99
+ 2023-10-16 12:59:31.136927: Pseudo dice [0.9751, 0.8736, 0.9003, 0.8785, 0.9003, 0.6579]
100
+ 2023-10-16 12:59:31.137013: Epoch time: 346.4 s
101
+ 2023-10-16 12:59:32.528379:
102
+ 2023-10-16 12:59:32.528518: Epoch 661
103
+ 2023-10-16 12:59:32.528620: Current learning rate: 0.00378
104
+ 2023-10-16 13:05:19.065362: train_loss -0.7425
105
+ 2023-10-16 13:05:19.065505: val_loss -0.6758
106
+ 2023-10-16 13:05:19.065624: Pseudo dice [0.9717, 0.8713, 0.8965, 0.8627, 0.9088, 0.7013]
107
+ 2023-10-16 13:05:19.065720: Epoch time: 346.54 s
108
+ 2023-10-16 13:05:20.282246:
109
+ 2023-10-16 13:05:20.282420: Epoch 662
110
+ 2023-10-16 13:05:20.282581: Current learning rate: 0.00377
111
+ 2023-10-16 13:11:06.956717: train_loss -0.7424
112
+ 2023-10-16 13:11:06.956860: val_loss -0.656
113
+ 2023-10-16 13:11:06.956975: Pseudo dice [0.9719, 0.8685, 0.8859, 0.8873, 0.9081, 0.6718]
114
+ 2023-10-16 13:11:06.957066: Epoch time: 346.68 s
115
+ 2023-10-16 13:11:08.177942:
116
+ 2023-10-16 13:11:08.178053: Epoch 663
117
+ 2023-10-16 13:11:08.178155: Current learning rate: 0.00376
118
+ 2023-10-16 13:16:55.040390: train_loss -0.7843
119
+ 2023-10-16 13:16:55.040555: val_loss -0.6718
120
+ 2023-10-16 13:16:55.040658: Pseudo dice [0.9739, 0.8831, 0.9084, 0.8481, 0.9041, 0.7072]
121
+ 2023-10-16 13:16:55.040744: Epoch time: 346.86 s
122
+ 2023-10-16 13:16:56.278773:
123
+ 2023-10-16 13:16:56.279001: Epoch 664
124
+ 2023-10-16 13:16:56.279125: Current learning rate: 0.00375
125
+ 2023-10-16 13:22:43.218038: train_loss -0.7699
126
+ 2023-10-16 13:22:43.218202: val_loss -0.6593
127
+ 2023-10-16 13:22:43.218335: Pseudo dice [0.9702, 0.8744, 0.9099, 0.8694, 0.9013, 0.6695]
128
+ 2023-10-16 13:22:43.218433: Epoch time: 346.94 s
129
+ 2023-10-16 13:22:44.439598:
130
+ 2023-10-16 13:22:44.439700: Epoch 665
131
+ 2023-10-16 13:22:44.439813: Current learning rate: 0.00374
132
+ 2023-10-16 13:28:31.328723: train_loss -0.7893
133
+ 2023-10-16 13:28:31.328860: val_loss -0.6805
134
+ 2023-10-16 13:28:31.328979: Pseudo dice [0.9727, 0.8743, 0.9098, 0.8652, 0.8763, 0.6501]
135
+ 2023-10-16 13:28:31.329065: Epoch time: 346.89 s
136
+ 2023-10-16 13:28:32.552882:
137
+ 2023-10-16 13:28:32.553001: Epoch 666
138
+ 2023-10-16 13:28:32.553102: Current learning rate: 0.00373
139
+ 2023-10-16 13:34:19.444855: train_loss -0.8055
140
+ 2023-10-16 13:34:19.444997: val_loss -0.714
141
+ 2023-10-16 13:34:19.445113: Pseudo dice [0.9722, 0.8727, 0.9127, 0.8944, 0.8632, 0.6484]
142
+ 2023-10-16 13:34:19.445198: Epoch time: 346.89 s
143
+ 2023-10-16 13:34:20.666053:
144
+ 2023-10-16 13:34:20.666156: Epoch 667
145
+ 2023-10-16 13:34:20.666275: Current learning rate: 0.00372
146
+ 2023-10-16 13:40:07.663087: train_loss -0.7773
147
+ 2023-10-16 13:40:07.663230: val_loss -0.6389
148
+ 2023-10-16 13:40:07.663347: Pseudo dice [0.9724, 0.8718, 0.8939, 0.8236, 0.8816, 0.6543]
149
+ 2023-10-16 13:40:07.663431: Epoch time: 347.0 s
150
+ 2023-10-16 13:40:09.075113:
151
+ 2023-10-16 13:40:09.075330: Epoch 668
152
+ 2023-10-16 13:40:09.075481: Current learning rate: 0.00371
153
+ 2023-10-16 13:45:55.930044: train_loss -0.7507
154
+ 2023-10-16 13:45:55.930204: val_loss -0.6692
155
+ 2023-10-16 13:45:55.930308: Pseudo dice [0.9716, 0.8709, 0.8977, 0.8937, 0.8658, 0.6703]
156
+ 2023-10-16 13:45:55.930395: Epoch time: 346.86 s
157
+ 2023-10-16 13:45:57.174367:
158
+ 2023-10-16 13:45:57.174487: Epoch 669
159
+ 2023-10-16 13:45:57.174600: Current learning rate: 0.0037
160
+ 2023-10-16 13:51:44.146317: train_loss -0.7798
161
+ 2023-10-16 13:51:44.146501: val_loss -0.6813
162
+ 2023-10-16 13:51:44.146618: Pseudo dice [0.9723, 0.8781, 0.8978, 0.9021, 0.919, 0.7104]
163
+ 2023-10-16 13:51:44.146704: Epoch time: 346.97 s
164
+ 2023-10-16 13:51:45.386658:
165
+ 2023-10-16 13:51:45.386854: Epoch 670
166
+ 2023-10-16 13:51:45.387032: Current learning rate: 0.00369
167
+ 2023-10-16 13:57:32.357284: train_loss -0.7779
168
+ 2023-10-16 13:57:32.357433: val_loss -0.6658
169
+ 2023-10-16 13:57:32.357549: Pseudo dice [0.9709, 0.8515, 0.905, 0.8648, 0.8305, 0.6363]
170
+ 2023-10-16 13:57:32.357636: Epoch time: 346.97 s
171
+ 2023-10-16 13:57:33.598728:
172
+ 2023-10-16 13:57:33.598853: Epoch 671
173
+ 2023-10-16 13:57:33.598966: Current learning rate: 0.00368
174
+ 2023-10-16 14:03:20.613798: train_loss -0.7601
175
+ 2023-10-16 14:03:20.613941: val_loss -0.6489
176
+ 2023-10-16 14:03:20.614055: Pseudo dice [0.975, 0.8824, 0.899, 0.8721, 0.8874, 0.65]
177
+ 2023-10-16 14:03:20.614146: Epoch time: 347.02 s
178
+ 2023-10-16 14:03:21.852867:
179
+ 2023-10-16 14:03:21.853044: Epoch 672
180
+ 2023-10-16 14:03:21.853190: Current learning rate: 0.00367
181
+ 2023-10-16 14:09:08.883238: train_loss -0.7634
182
+ 2023-10-16 14:09:08.883381: val_loss -0.706
183
+ 2023-10-16 14:09:08.883495: Pseudo dice [0.9726, 0.8749, 0.9215, 0.864, 0.9013, 0.6659]
184
+ 2023-10-16 14:09:08.883586: Epoch time: 347.03 s
185
+ 2023-10-16 14:09:10.117944:
186
+ 2023-10-16 14:09:10.118047: Epoch 673
187
+ 2023-10-16 14:09:10.118160: Current learning rate: 0.00366
188
+ 2023-10-16 14:14:57.067062: train_loss -0.7912
189
+ 2023-10-16 14:14:57.067231: val_loss -0.6643
190
+ 2023-10-16 14:14:57.067334: Pseudo dice [0.9719, 0.8677, 0.9038, 0.8649, 0.879, 0.6476]
191
+ 2023-10-16 14:14:57.067421: Epoch time: 346.95 s
192
+ 2023-10-16 14:14:58.304619:
193
+ 2023-10-16 14:14:58.304735: Epoch 674
194
+ 2023-10-16 14:14:58.304837: Current learning rate: 0.00365
195
+ 2023-10-16 14:20:45.279370: train_loss -0.7615
196
+ 2023-10-16 14:20:45.279514: val_loss -0.6747
197
+ 2023-10-16 14:20:45.279631: Pseudo dice [0.9713, 0.8788, 0.9164, 0.88, 0.9019, 0.6342]
198
+ 2023-10-16 14:20:45.279728: Epoch time: 346.98 s
199
+ 2023-10-16 14:20:46.698153:
200
+ 2023-10-16 14:20:46.698259: Epoch 675
201
+ 2023-10-16 14:20:46.698378: Current learning rate: 0.00364
202
+ 2023-10-16 14:26:33.759420: train_loss -0.7831
203
+ 2023-10-16 14:26:33.759569: val_loss -0.6407
204
+ 2023-10-16 14:26:33.759697: Pseudo dice [0.972, 0.8794, 0.8897, 0.905, 0.8675, 0.6662]
205
+ 2023-10-16 14:26:33.759787: Epoch time: 347.06 s
206
+ 2023-10-16 14:26:35.012318:
207
+ 2023-10-16 14:26:35.012434: Epoch 676
208
+ 2023-10-16 14:26:35.012539: Current learning rate: 0.00363
209
+ 2023-10-16 14:32:22.167936: train_loss -0.76
210
+ 2023-10-16 14:32:22.168096: val_loss -0.6501
211
+ 2023-10-16 14:32:22.168233: Pseudo dice [0.9728, 0.874, 0.8996, 0.8746, 0.8343, 0.639]
212
+ 2023-10-16 14:32:22.168343: Epoch time: 347.16 s
213
+ 2023-10-16 14:32:23.423537:
214
+ 2023-10-16 14:32:23.423660: Epoch 677
215
+ 2023-10-16 14:32:23.423763: Current learning rate: 0.00362
216
+ 2023-10-16 14:38:10.591294: train_loss -0.773
217
+ 2023-10-16 14:38:10.591457: val_loss -0.6392
218
+ 2023-10-16 14:38:10.591580: Pseudo dice [0.9732, 0.8753, 0.8913, 0.8908, 0.8786, 0.6712]
219
+ 2023-10-16 14:38:10.591680: Epoch time: 347.17 s
220
+ 2023-10-16 14:38:11.866211:
221
+ 2023-10-16 14:38:11.866316: Epoch 678
222
+ 2023-10-16 14:38:11.866436: Current learning rate: 0.00361
223
+ 2023-10-16 14:43:58.890313: train_loss -0.7868
224
+ 2023-10-16 14:43:58.890458: val_loss -0.6914
225
+ 2023-10-16 14:43:58.890584: Pseudo dice [0.9726, 0.8752, 0.9006, 0.8883, 0.8705, 0.6923]
226
+ 2023-10-16 14:43:58.890673: Epoch time: 347.02 s
227
+ 2023-10-16 14:44:00.139863:
228
+ 2023-10-16 14:44:00.139964: Epoch 679
229
+ 2023-10-16 14:44:00.140081: Current learning rate: 0.0036
230
+ 2023-10-16 14:49:47.252280: train_loss -0.7583
231
+ 2023-10-16 14:49:47.252435: val_loss -0.6891
232
+ 2023-10-16 14:49:47.252540: Pseudo dice [0.9735, 0.8717, 0.9093, 0.8589, 0.9095, 0.6843]
233
+ 2023-10-16 14:49:47.252628: Epoch time: 347.11 s
234
+ 2023-10-16 14:49:48.496274:
235
+ 2023-10-16 14:49:48.496376: Epoch 680
236
+ 2023-10-16 14:49:48.496490: Current learning rate: 0.00359
237
+ 2023-10-16 14:55:35.546312: train_loss -0.7443
238
+ 2023-10-16 14:55:35.546449: val_loss -0.6779
239
+ 2023-10-16 14:55:35.546575: Pseudo dice [0.972, 0.8794, 0.906, 0.8671, 0.8294, 0.6668]
240
+ 2023-10-16 14:55:35.546662: Epoch time: 347.05 s
241
+ 2023-10-16 14:55:36.785294:
242
+ 2023-10-16 14:55:36.785476: Epoch 681
243
+ 2023-10-16 14:55:36.785625: Current learning rate: 0.00358
244
+ 2023-10-16 15:01:23.742646: train_loss -0.7571
245
+ 2023-10-16 15:01:23.742798: val_loss -0.7238
246
+ 2023-10-16 15:01:23.742901: Pseudo dice [0.9724, 0.8781, 0.9204, 0.8934, 0.8916, 0.6764]
247
+ 2023-10-16 15:01:23.742988: Epoch time: 346.96 s
248
+ 2023-10-16 15:01:25.167395:
249
+ 2023-10-16 15:01:25.167524: Epoch 682
250
+ 2023-10-16 15:01:25.167639: Current learning rate: 0.00357
251
+ 2023-10-16 15:07:12.223466: train_loss -0.782
252
+ 2023-10-16 15:07:12.223630: val_loss -0.6878
253
+ 2023-10-16 15:07:12.223733: Pseudo dice [0.9723, 0.8679, 0.9029, 0.8468, 0.8487, 0.6375]
254
+ 2023-10-16 15:07:12.223820: Epoch time: 347.06 s
255
+ 2023-10-16 15:07:13.468253:
256
+ 2023-10-16 15:07:13.468360: Epoch 683
257
+ 2023-10-16 15:07:13.468473: Current learning rate: 0.00356
258
+ 2023-10-16 15:13:00.568304: train_loss -0.7683
259
+ 2023-10-16 15:13:00.568466: val_loss -0.7197
260
+ 2023-10-16 15:13:00.568593: Pseudo dice [0.9729, 0.888, 0.9113, 0.9009, 0.9092, 0.7284]
261
+ 2023-10-16 15:13:00.568692: Epoch time: 347.1 s
262
+ 2023-10-16 15:13:01.855891:
263
+ 2023-10-16 15:13:01.855999: Epoch 684
264
+ 2023-10-16 15:13:01.856115: Current learning rate: 0.00355
265
+ 2023-10-16 15:18:48.879060: train_loss -0.7536
266
+ 2023-10-16 15:18:48.879210: val_loss -0.6543
267
+ 2023-10-16 15:18:48.879330: Pseudo dice [0.9701, 0.8749, 0.9048, 0.851, 0.9098, 0.6551]
268
+ 2023-10-16 15:18:48.879416: Epoch time: 347.02 s
269
+ 2023-10-16 15:18:50.125212:
270
+ 2023-10-16 15:18:50.125395: Epoch 685
271
+ 2023-10-16 15:18:50.125538: Current learning rate: 0.00354
272
+ 2023-10-16 15:24:37.183900: train_loss -0.7768
273
+ 2023-10-16 15:24:37.184041: val_loss -0.7177
274
+ 2023-10-16 15:24:37.184159: Pseudo dice [0.9731, 0.8867, 0.9078, 0.8753, 0.8575, 0.6952]
275
+ 2023-10-16 15:24:37.184245: Epoch time: 347.06 s
276
+ 2023-10-16 15:24:38.425323:
277
+ 2023-10-16 15:24:38.425500: Epoch 686
278
+ 2023-10-16 15:24:38.425670: Current learning rate: 0.00353
279
+ 2023-10-16 15:30:25.450144: train_loss -0.7616
280
+ 2023-10-16 15:30:25.450284: val_loss -0.6811
281
+ 2023-10-16 15:30:25.450403: Pseudo dice [0.9726, 0.8773, 0.9077, 0.9011, 0.8914, 0.6842]
282
+ 2023-10-16 15:30:25.450490: Epoch time: 347.03 s
283
+ 2023-10-16 15:30:26.691565:
284
+ 2023-10-16 15:30:26.691684: Epoch 687
285
+ 2023-10-16 15:30:26.691785: Current learning rate: 0.00352
286
+ 2023-10-16 15:36:13.898718: train_loss -0.746
287
+ 2023-10-16 15:36:13.898895: val_loss -0.6756
288
+ 2023-10-16 15:36:13.899005: Pseudo dice [0.9733, 0.8742, 0.9142, 0.8642, 0.8651, 0.6719]
289
+ 2023-10-16 15:36:13.899095: Epoch time: 347.21 s
290
+ 2023-10-16 15:36:15.144222:
291
+ 2023-10-16 15:36:15.144325: Epoch 688
292
+ 2023-10-16 15:36:15.144439: Current learning rate: 0.00351
293
+ 2023-10-16 15:42:02.385429: train_loss -0.755
294
+ 2023-10-16 15:42:02.385591: val_loss -0.6726
295
+ 2023-10-16 15:42:02.385713: Pseudo dice [0.9694, 0.8737, 0.9169, 0.8593, 0.8885, 0.6376]
296
+ 2023-10-16 15:42:02.385812: Epoch time: 347.24 s
297
+ 2023-10-16 15:42:03.798766:
298
+ 2023-10-16 15:42:03.799072: Epoch 689
299
+ 2023-10-16 15:42:03.799268: Current learning rate: 0.0035
300
+ 2023-10-16 15:47:50.885578: train_loss -0.7973
301
+ 2023-10-16 15:47:50.885720: val_loss -0.7042
302
+ 2023-10-16 15:47:50.885838: Pseudo dice [0.9577, 0.8764, 0.9045, 0.9065, 0.9145, 0.6357]
303
+ 2023-10-16 15:47:50.885922: Epoch time: 347.09 s
304
+ 2023-10-16 15:47:52.128037:
305
+ 2023-10-16 15:47:52.128145: Epoch 690
306
+ 2023-10-16 15:47:52.128262: Current learning rate: 0.00349
307
+ 2023-10-16 15:53:39.254061: train_loss -0.7614
308
+ 2023-10-16 15:53:39.254211: val_loss -0.7025
309
+ 2023-10-16 15:53:39.254324: Pseudo dice [0.9724, 0.8791, 0.9141, 0.853, 0.8714, 0.6621]
310
+ 2023-10-16 15:53:39.254413: Epoch time: 347.13 s
311
+ 2023-10-16 15:53:40.498606:
312
+ 2023-10-16 15:53:40.498716: Epoch 691
313
+ 2023-10-16 15:53:40.498834: Current learning rate: 0.00348
314
+ 2023-10-16 15:59:27.492378: train_loss -0.8142
315
+ 2023-10-16 15:59:27.492520: val_loss -0.6976
316
+ 2023-10-16 15:59:27.492631: Pseudo dice [0.9733, 0.8819, 0.9122, 0.8859, 0.9243, 0.6735]
317
+ 2023-10-16 15:59:27.492722: Epoch time: 346.99 s
318
+ 2023-10-16 15:59:28.744697:
319
+ 2023-10-16 15:59:28.744801: Epoch 692
320
+ 2023-10-16 15:59:28.744918: Current learning rate: 0.00346
321
+ 2023-10-16 16:05:15.697318: train_loss -0.7856
322
+ 2023-10-16 16:05:15.697461: val_loss -0.695
323
+ 2023-10-16 16:05:15.697591: Pseudo dice [0.9567, 0.8851, 0.9146, 0.869, 0.8951, 0.6365]
324
+ 2023-10-16 16:05:15.697677: Epoch time: 346.95 s
325
+ 2023-10-16 16:05:16.930659:
326
+ 2023-10-16 16:05:16.930859: Epoch 693
327
+ 2023-10-16 16:05:16.930986: Current learning rate: 0.00345
328
+ 2023-10-16 16:11:03.948810: train_loss -0.7754
329
+ 2023-10-16 16:11:03.948950: val_loss -0.718
330
+ 2023-10-16 16:11:03.949069: Pseudo dice [0.9714, 0.8689, 0.8983, 0.8828, 0.8668, 0.6395]
331
+ 2023-10-16 16:11:03.949156: Epoch time: 347.02 s
332
+ 2023-10-16 16:11:05.185005:
333
+ 2023-10-16 16:11:05.185197: Epoch 694
334
+ 2023-10-16 16:11:05.185301: Current learning rate: 0.00344
335
+ 2023-10-16 16:16:52.270051: train_loss -0.7802
336
+ 2023-10-16 16:16:52.270246: val_loss -0.628
337
+ 2023-10-16 16:16:52.270350: Pseudo dice [0.97, 0.8711, 0.9118, 0.9183, 0.8963, 0.622]
338
+ 2023-10-16 16:16:52.270437: Epoch time: 347.09 s
339
+ 2023-10-16 16:16:53.686908:
340
+ 2023-10-16 16:16:53.687034: Epoch 695
341
+ 2023-10-16 16:16:53.687136: Current learning rate: 0.00343
342
+ 2023-10-16 16:22:40.835341: train_loss -0.7637
343
+ 2023-10-16 16:22:40.835490: val_loss -0.6938
344
+ 2023-10-16 16:22:40.835609: Pseudo dice [0.9748, 0.8836, 0.8923, 0.8937, 0.9091, 0.6934]
345
+ 2023-10-16 16:22:40.835694: Epoch time: 347.15 s
346
+ 2023-10-16 16:22:42.072929:
347
+ 2023-10-16 16:22:42.073036: Epoch 696
348
+ 2023-10-16 16:22:42.073146: Current learning rate: 0.00342
349
+ 2023-10-16 16:28:29.189310: train_loss -0.7534
350
+ 2023-10-16 16:28:29.189445: val_loss -0.673
351
+ 2023-10-16 16:28:29.189578: Pseudo dice [0.9704, 0.8706, 0.8972, 0.9129, 0.9055, 0.697]
352
+ 2023-10-16 16:28:29.189677: Epoch time: 347.12 s
353
+ 2023-10-16 16:28:30.433985:
354
+ 2023-10-16 16:28:30.434089: Epoch 697
355
+ 2023-10-16 16:28:30.434215: Current learning rate: 0.00341
356
+ 2023-10-16 16:34:17.422712: train_loss -0.7803
357
+ 2023-10-16 16:34:17.422876: val_loss -0.6951
358
+ 2023-10-16 16:34:17.422993: Pseudo dice [0.9715, 0.8701, 0.9001, 0.8812, 0.886, 0.6462]
359
+ 2023-10-16 16:34:17.423078: Epoch time: 346.99 s
360
+ 2023-10-16 16:34:18.685070:
361
+ 2023-10-16 16:34:18.685174: Epoch 698
362
+ 2023-10-16 16:34:18.685283: Current learning rate: 0.0034
363
+ 2023-10-16 16:40:05.730069: train_loss -0.7805
364
+ 2023-10-16 16:40:05.730230: val_loss -0.625
365
+ 2023-10-16 16:40:05.730333: Pseudo dice [0.9709, 0.8713, 0.9143, 0.803, 0.9116, 0.6199]
366
+ 2023-10-16 16:40:05.730420: Epoch time: 347.05 s
367
+ 2023-10-16 16:40:06.970120:
368
+ 2023-10-16 16:40:06.970280: Epoch 699
369
+ 2023-10-16 16:40:06.970449: Current learning rate: 0.00339
370
+ 2023-10-16 16:45:54.016073: train_loss -0.7806
371
+ 2023-10-16 16:45:54.016217: val_loss -0.7162
372
+ 2023-10-16 16:45:54.016339: Pseudo dice [0.9718, 0.8703, 0.9138, 0.8974, 0.8785, 0.6422]
373
+ 2023-10-16 16:45:54.016433: Epoch time: 347.05 s
374
+ 2023-10-16 16:45:56.997331:
375
+ 2023-10-16 16:45:56.997594: Epoch 700
376
+ 2023-10-16 16:45:56.997781: Current learning rate: 0.00338
377
+ 2023-10-16 16:51:44.045771: train_loss -0.7833
378
+ 2023-10-16 16:51:44.045931: val_loss -0.6933
379
+ 2023-10-16 16:51:44.046034: Pseudo dice [0.9723, 0.865, 0.9224, 0.8523, 0.8763, 0.6381]
380
+ 2023-10-16 16:51:44.046121: Epoch time: 347.05 s
381
+ 2023-10-16 16:51:45.448343:
382
+ 2023-10-16 16:51:45.448452: Epoch 701
383
+ 2023-10-16 16:51:45.448575: Current learning rate: 0.00337
384
+ 2023-10-16 16:57:32.561426: train_loss -0.7571
385
+ 2023-10-16 16:57:32.561591: val_loss -0.6655
386
+ 2023-10-16 16:57:32.561729: Pseudo dice [0.9711, 0.8732, 0.9, 0.8923, 0.9004, 0.6384]
387
+ 2023-10-16 16:57:32.561820: Epoch time: 347.11 s
388
+ 2023-10-16 16:57:33.801441:
389
+ 2023-10-16 16:57:33.801694: Epoch 702
390
+ 2023-10-16 16:57:33.801955: Current learning rate: 0.00336
391
+ 2023-10-16 17:03:20.859094: train_loss -0.7672
392
+ 2023-10-16 17:03:20.859270: val_loss -0.7034
393
+ 2023-10-16 17:03:20.859374: Pseudo dice [0.9721, 0.8795, 0.898, 0.9042, 0.9137, 0.6659]
394
+ 2023-10-16 17:03:20.859462: Epoch time: 347.06 s
395
+ 2023-10-16 17:03:22.101246:
396
+ 2023-10-16 17:03:22.101372: Epoch 703
397
+ 2023-10-16 17:03:22.101486: Current learning rate: 0.00335
398
+ 2023-10-16 17:09:09.126226: train_loss -0.783
399
+ 2023-10-16 17:09:09.126378: val_loss -0.6845
400
+ 2023-10-16 17:09:09.126510: Pseudo dice [0.9726, 0.8791, 0.9095, 0.8866, 0.8608, 0.6806]
401
+ 2023-10-16 17:09:09.126611: Epoch time: 347.03 s
402
+ 2023-10-16 17:09:10.386247:
403
+ 2023-10-16 17:09:10.386356: Epoch 704
404
+ 2023-10-16 17:09:10.386464: Current learning rate: 0.00334
405
+ 2023-10-16 17:14:57.403064: train_loss -0.7716
406
+ 2023-10-16 17:14:57.403202: val_loss -0.698
407
+ 2023-10-16 17:14:57.403321: Pseudo dice [0.9718, 0.8811, 0.9088, 0.9002, 0.8809, 0.6618]
408
+ 2023-10-16 17:14:57.403420: Epoch time: 347.02 s
409
+ 2023-10-16 17:14:58.645834:
410
+ 2023-10-16 17:14:58.646003: Epoch 705
411
+ 2023-10-16 17:14:58.646159: Current learning rate: 0.00333
412
+ 2023-10-16 17:20:45.704206: train_loss -0.7503
413
+ 2023-10-16 17:20:45.704348: val_loss -0.6847
414
+ 2023-10-16 17:20:45.704459: Pseudo dice [0.9698, 0.873, 0.8917, 0.8889, 0.8997, 0.6743]
415
+ 2023-10-16 17:20:45.704549: Epoch time: 347.06 s
416
+ 2023-10-16 17:20:46.957640:
417
+ 2023-10-16 17:20:46.957807: Epoch 706
418
+ 2023-10-16 17:20:46.958007: Current learning rate: 0.00332
419
+ 2023-10-16 17:26:33.958879: train_loss -0.7752
420
+ 2023-10-16 17:26:33.959020: val_loss -0.7045
421
+ 2023-10-16 17:26:33.959134: Pseudo dice [0.9722, 0.8739, 0.9183, 0.8454, 0.9018, 0.6716]
422
+ 2023-10-16 17:26:33.959222: Epoch time: 347.0 s
423
+ 2023-10-16 17:26:35.206671:
424
+ 2023-10-16 17:26:35.206788: Epoch 707
425
+ 2023-10-16 17:26:35.206922: Current learning rate: 0.00331
426
+ 2023-10-16 17:32:22.138206: train_loss -0.7669
427
+ 2023-10-16 17:32:22.138348: val_loss -0.7071
428
+ 2023-10-16 17:32:22.138467: Pseudo dice [0.9745, 0.8816, 0.8954, 0.8867, 0.8916, 0.6833]
429
+ 2023-10-16 17:32:22.138563: Epoch time: 346.93 s
430
+ 2023-10-16 17:32:23.563481:
431
+ 2023-10-16 17:32:23.563607: Epoch 708
432
+ 2023-10-16 17:32:23.563741: Current learning rate: 0.0033
433
+ 2023-10-16 17:38:10.497249: train_loss -0.7673
434
+ 2023-10-16 17:38:10.497402: val_loss -0.7156
435
+ 2023-10-16 17:38:10.497540: Pseudo dice [0.9729, 0.8705, 0.9078, 0.8799, 0.8892, 0.6529]
436
+ 2023-10-16 17:38:10.497638: Epoch time: 346.93 s
437
+ 2023-10-16 17:38:11.745688:
438
+ 2023-10-16 17:38:11.745798: Epoch 709
439
+ 2023-10-16 17:38:11.745906: Current learning rate: 0.00329
440
+ 2023-10-16 17:43:58.635768: train_loss -0.7809
441
+ 2023-10-16 17:43:58.635956: val_loss -0.6491
442
+ 2023-10-16 17:43:58.636094: Pseudo dice [0.9702, 0.8701, 0.8895, 0.8521, 0.8554, 0.6343]
443
+ 2023-10-16 17:43:58.636192: Epoch time: 346.89 s
444
+ 2023-10-16 17:43:59.878851:
445
+ 2023-10-16 17:43:59.878956: Epoch 710
446
+ 2023-10-16 17:43:59.879080: Current learning rate: 0.00328
447
+ 2023-10-16 17:49:46.728398: train_loss -0.7643
448
+ 2023-10-16 17:49:46.728543: val_loss -0.7095
449
+ 2023-10-16 17:49:46.728656: Pseudo dice [0.9692, 0.8689, 0.9171, 0.8931, 0.9119, 0.6732]
450
+ 2023-10-16 17:49:46.728746: Epoch time: 346.85 s
451
+ 2023-10-16 17:49:47.973462:
452
+ 2023-10-16 17:49:47.973571: Epoch 711
453
+ 2023-10-16 17:49:47.973685: Current learning rate: 0.00327
454
+ 2023-10-16 17:55:34.924966: train_loss -0.7672
455
+ 2023-10-16 17:55:34.925110: val_loss -0.7094
456
+ 2023-10-16 17:55:34.925245: Pseudo dice [0.9737, 0.8833, 0.9068, 0.917, 0.8682, 0.6801]
457
+ 2023-10-16 17:55:34.925343: Epoch time: 346.95 s
458
+ 2023-10-16 17:55:36.177106:
459
+ 2023-10-16 17:55:36.177264: Epoch 712
460
+ 2023-10-16 17:55:36.177436: Current learning rate: 0.00326
461
+ 2023-10-16 18:01:22.954619: train_loss -0.7593
462
+ 2023-10-16 18:01:22.954769: val_loss -0.6383
463
+ 2023-10-16 18:01:22.954875: Pseudo dice [0.9728, 0.8743, 0.9015, 0.8736, 0.8608, 0.6368]
464
+ 2023-10-16 18:01:22.954967: Epoch time: 346.78 s
465
+ 2023-10-16 18:01:24.252601:
466
+ 2023-10-16 18:01:24.252719: Epoch 713
467
+ 2023-10-16 18:01:24.252821: Current learning rate: 0.00325
468
+ 2023-10-16 18:07:11.209710: train_loss -0.7669
469
+ 2023-10-16 18:07:11.209862: val_loss -0.6773
470
+ 2023-10-16 18:07:11.209980: Pseudo dice [0.9705, 0.876, 0.9022, 0.9134, 0.895, 0.6792]
471
+ 2023-10-16 18:07:11.210068: Epoch time: 346.96 s
472
+ 2023-10-16 18:07:12.472027:
473
+ 2023-10-16 18:07:12.472130: Epoch 714
474
+ 2023-10-16 18:07:12.472245: Current learning rate: 0.00324
475
+ 2023-10-16 18:12:59.421642: train_loss -0.7703
476
+ 2023-10-16 18:12:59.421783: val_loss -0.6806
477
+ 2023-10-16 18:12:59.421922: Pseudo dice [0.9727, 0.875, 0.9039, 0.8918, 0.8685, 0.6262]
478
+ 2023-10-16 18:12:59.422021: Epoch time: 346.95 s
479
+ 2023-10-16 18:13:00.847863:
480
+ 2023-10-16 18:13:00.847979: Epoch 715
481
+ 2023-10-16 18:13:00.848104: Current learning rate: 0.00323
482
+ 2023-10-16 18:18:47.766829: train_loss -0.7554
483
+ 2023-10-16 18:18:47.766988: val_loss -0.629
484
+ 2023-10-16 18:18:47.767092: Pseudo dice [0.9728, 0.8666, 0.8996, 0.8588, 0.8688, 0.6596]
485
+ 2023-10-16 18:18:47.767176: Epoch time: 346.92 s
486
+ 2023-10-16 18:18:49.017198:
487
+ 2023-10-16 18:18:49.017307: Epoch 716
488
+ 2023-10-16 18:18:49.017418: Current learning rate: 0.00322
489
+ 2023-10-16 18:24:35.870587: train_loss -0.7733
490
+ 2023-10-16 18:24:35.870733: val_loss -0.6543
491
+ 2023-10-16 18:24:35.870835: Pseudo dice [0.9709, 0.8719, 0.9111, 0.8689, 0.9133, 0.6523]
492
+ 2023-10-16 18:24:35.870922: Epoch time: 346.85 s
493
+ 2023-10-16 18:24:37.124043:
494
+ 2023-10-16 18:24:37.124146: Epoch 717
495
+ 2023-10-16 18:24:37.124261: Current learning rate: 0.00321
496
+ 2023-10-16 18:30:23.957323: train_loss -0.77
497
+ 2023-10-16 18:30:23.957465: val_loss -0.6474
498
+ 2023-10-16 18:30:23.957584: Pseudo dice [0.9735, 0.8619, 0.9143, 0.8353, 0.8954, 0.6592]
499
+ 2023-10-16 18:30:23.957669: Epoch time: 346.83 s
500
+ 2023-10-16 18:30:25.211150:
501
+ 2023-10-16 18:30:25.211277: Epoch 718
502
+ 2023-10-16 18:30:25.211446: Current learning rate: 0.0032
503
+ 2023-10-16 18:36:12.018801: train_loss -0.7782
504
+ 2023-10-16 18:36:12.018966: val_loss -0.6482
505
+ 2023-10-16 18:36:12.019079: Pseudo dice [0.9726, 0.8791, 0.8996, 0.884, 0.9021, 0.7036]
506
+ 2023-10-16 18:36:12.019172: Epoch time: 346.81 s
507
+ 2023-10-16 18:36:13.263700:
508
+ 2023-10-16 18:36:13.263804: Epoch 719
509
+ 2023-10-16 18:36:13.263930: Current learning rate: 0.00319
510
+ 2023-10-16 18:42:00.188163: train_loss -0.7888
511
+ 2023-10-16 18:42:00.188304: val_loss -0.7018
512
+ 2023-10-16 18:42:00.188424: Pseudo dice [0.9712, 0.87, 0.904, 0.8874, 0.8894, 0.638]
513
+ 2023-10-16 18:42:00.188511: Epoch time: 346.93 s
514
+ 2023-10-16 18:42:01.446100:
515
+ 2023-10-16 18:42:01.446201: Epoch 720
516
+ 2023-10-16 18:42:01.446325: Current learning rate: 0.00318
517
+ 2023-10-16 18:47:48.306199: train_loss -0.7908
518
+ 2023-10-16 18:47:48.306342: val_loss -0.715
519
+ 2023-10-16 18:47:48.306471: Pseudo dice [0.9722, 0.8798, 0.9101, 0.8619, 0.8633, 0.6799]
520
+ 2023-10-16 18:47:48.306579: Epoch time: 346.86 s
521
+ 2023-10-16 18:47:49.570007:
522
+ 2023-10-16 18:47:49.570175: Epoch 721
523
+ 2023-10-16 18:47:49.570375: Current learning rate: 0.00317
524
+ 2023-10-16 18:53:36.487244: train_loss -0.7834
525
+ 2023-10-16 18:53:36.487411: val_loss -0.7024
526
+ 2023-10-16 18:53:36.487536: Pseudo dice [0.9739, 0.8812, 0.9163, 0.8823, 0.9016, 0.6657]
527
+ 2023-10-16 18:53:36.487633: Epoch time: 346.92 s
528
+ 2023-10-16 18:53:37.910301:
529
+ 2023-10-16 18:53:37.910411: Epoch 722
530
+ 2023-10-16 18:53:37.910543: Current learning rate: 0.00316
531
+ 2023-10-16 18:59:24.792783: train_loss -0.7423
532
+ 2023-10-16 18:59:24.792937: val_loss -0.5908
533
+ 2023-10-16 18:59:24.793062: Pseudo dice [0.9718, 0.8649, 0.9003, 0.8871, 0.9164, 0.6585]
534
+ 2023-10-16 18:59:24.793158: Epoch time: 346.88 s
535
+ 2023-10-16 18:59:26.048174:
536
+ 2023-10-16 18:59:26.048283: Epoch 723
537
+ 2023-10-16 18:59:26.048398: Current learning rate: 0.00315
538
+ 2023-10-16 19:05:12.939356: train_loss -0.7433
539
+ 2023-10-16 19:05:12.939518: val_loss -0.6831
540
+ 2023-10-16 19:05:12.939631: Pseudo dice [0.9692, 0.8628, 0.9104, 0.8846, 0.901, 0.6258]
541
+ 2023-10-16 19:05:12.939718: Epoch time: 346.89 s
542
+ 2023-10-16 19:05:14.202779:
543
+ 2023-10-16 19:05:14.202989: Epoch 724
544
+ 2023-10-16 19:05:14.203168: Current learning rate: 0.00314
545
+ 2023-10-16 19:11:01.255048: train_loss -0.7776
546
+ 2023-10-16 19:11:01.255198: val_loss -0.6905
547
+ 2023-10-16 19:11:01.255312: Pseudo dice [0.9727, 0.8823, 0.9072, 0.9037, 0.8895, 0.6804]
548
+ 2023-10-16 19:11:01.255404: Epoch time: 347.05 s
549
+ 2023-10-16 19:11:02.504785:
550
+ 2023-10-16 19:11:02.504904: Epoch 725
551
+ 2023-10-16 19:11:02.505020: Current learning rate: 0.00313
552
+ 2023-10-16 19:16:49.462324: train_loss -0.7808
553
+ 2023-10-16 19:16:49.462468: val_loss -0.6713
554
+ 2023-10-16 19:16:49.462594: Pseudo dice [0.971, 0.8769, 0.9221, 0.8643, 0.8723, 0.6858]
555
+ 2023-10-16 19:16:49.462681: Epoch time: 346.96 s
556
+ 2023-10-16 19:16:50.711224:
557
+ 2023-10-16 19:16:50.711414: Epoch 726
558
+ 2023-10-16 19:16:50.711580: Current learning rate: 0.00312
559
+ 2023-10-16 19:22:37.729152: train_loss -0.7868
560
+ 2023-10-16 19:22:37.729310: val_loss -0.6498
561
+ 2023-10-16 19:22:37.729414: Pseudo dice [0.9713, 0.8733, 0.8978, 0.8517, 0.8767, 0.6395]
562
+ 2023-10-16 19:22:37.729499: Epoch time: 347.02 s
563
+ 2023-10-16 19:22:38.988612:
564
+ 2023-10-16 19:22:38.988800: Epoch 727
565
+ 2023-10-16 19:22:38.988944: Current learning rate: 0.00311
566
+ 2023-10-16 19:28:25.938247: train_loss -0.7688
567
+ 2023-10-16 19:28:25.938400: val_loss -0.6864
568
+ 2023-10-16 19:28:25.938513: Pseudo dice [0.967, 0.8752, 0.8958, 0.8807, 0.8955, 0.6826]
569
+ 2023-10-16 19:28:25.938601: Epoch time: 346.95 s
570
+ 2023-10-16 19:28:27.357462:
571
+ 2023-10-16 19:28:27.357578: Epoch 728
572
+ 2023-10-16 19:28:27.357693: Current learning rate: 0.0031
573
+ 2023-10-16 19:34:14.377090: train_loss -0.7517
574
+ 2023-10-16 19:34:14.377239: val_loss -0.6426
575
+ 2023-10-16 19:34:14.377358: Pseudo dice [0.9716, 0.8628, 0.8938, 0.875, 0.8832, 0.6524]
576
+ 2023-10-16 19:34:14.377446: Epoch time: 347.02 s
577
+ 2023-10-16 19:34:15.624686:
578
+ 2023-10-16 19:34:15.624795: Epoch 729
579
+ 2023-10-16 19:34:15.624912: Current learning rate: 0.00309
580
+ 2023-10-16 19:40:02.607368: train_loss -0.7796
581
+ 2023-10-16 19:40:02.607537: val_loss -0.6619
582
+ 2023-10-16 19:40:02.607661: Pseudo dice [0.9724, 0.8814, 0.8943, 0.8721, 0.8429, 0.678]
583
+ 2023-10-16 19:40:02.607759: Epoch time: 346.98 s
584
+ 2023-10-16 19:40:03.855425:
585
+ 2023-10-16 19:40:03.855531: Epoch 730
586
+ 2023-10-16 19:40:03.855642: Current learning rate: 0.00308
587
+ 2023-10-16 19:45:50.807545: train_loss -0.782
588
+ 2023-10-16 19:45:50.807684: val_loss -0.6797
589
+ 2023-10-16 19:45:50.807798: Pseudo dice [0.9722, 0.8782, 0.8965, 0.8921, 0.8947, 0.6896]
590
+ 2023-10-16 19:45:50.807889: Epoch time: 346.95 s
591
+ 2023-10-16 19:45:52.052246:
592
+ 2023-10-16 19:45:52.052373: Epoch 731
593
+ 2023-10-16 19:45:52.052481: Current learning rate: 0.00307
594
+ 2023-10-16 19:51:39.053820: train_loss -0.7904
595
+ 2023-10-16 19:51:39.053964: val_loss -0.6774
596
+ 2023-10-16 19:51:39.054094: Pseudo dice [0.9718, 0.8854, 0.9172, 0.8836, 0.8633, 0.6705]
597
+ 2023-10-16 19:51:39.054178: Epoch time: 347.0 s
598
+ 2023-10-16 19:51:40.560286:
599
+ 2023-10-16 19:51:40.560477: Epoch 732
600
+ 2023-10-16 19:51:40.560627: Current learning rate: 0.00306
601
+ 2023-10-16 19:57:27.542472: train_loss -0.7769
602
+ 2023-10-16 19:57:27.542644: val_loss -0.7191
603
+ 2023-10-16 19:57:27.542748: Pseudo dice [0.9723, 0.8758, 0.9074, 0.8964, 0.8692, 0.6413]
604
+ 2023-10-16 19:57:27.542835: Epoch time: 346.98 s
605
+ 2023-10-16 19:57:28.820989:
606
+ 2023-10-16 19:57:28.821165: Epoch 733
607
+ 2023-10-16 19:57:28.821347: Current learning rate: 0.00305
608
+ 2023-10-16 20:03:15.776599: train_loss -0.7644
609
+ 2023-10-16 20:03:15.776749: val_loss -0.6748
610
+ 2023-10-16 20:03:15.776865: Pseudo dice [0.9678, 0.8599, 0.9078, 0.8708, 0.9094, 0.6484]
611
+ 2023-10-16 20:03:15.776958: Epoch time: 346.96 s
612
+ 2023-10-16 20:03:17.026584:
613
+ 2023-10-16 20:03:17.026691: Epoch 734
614
+ 2023-10-16 20:03:17.026812: Current learning rate: 0.00304
615
+ 2023-10-16 20:09:03.869975: train_loss -0.7608
616
+ 2023-10-16 20:09:03.870114: val_loss -0.6832
617
+ 2023-10-16 20:09:03.870232: Pseudo dice [0.9709, 0.8705, 0.9131, 0.9043, 0.9058, 0.6642]
618
+ 2023-10-16 20:09:03.870320: Epoch time: 346.84 s
619
+ 2023-10-16 20:09:05.310963:
620
+ 2023-10-16 20:09:05.311097: Epoch 735
621
+ 2023-10-16 20:09:05.311204: Current learning rate: 0.00303
622
+ 2023-10-16 20:14:52.073126: train_loss -0.7843
623
+ 2023-10-16 20:14:52.073279: val_loss -0.6834
624
+ 2023-10-16 20:14:52.073405: Pseudo dice [0.971, 0.8622, 0.9047, 0.905, 0.8859, 0.6482]
625
+ 2023-10-16 20:14:52.073491: Epoch time: 346.76 s
626
+ 2023-10-16 20:14:53.319446:
627
+ 2023-10-16 20:14:53.319551: Epoch 736
628
+ 2023-10-16 20:14:53.319667: Current learning rate: 0.00302
629
+ 2023-10-16 20:20:40.196349: train_loss -0.7921
630
+ 2023-10-16 20:20:40.196598: val_loss -0.6307
631
+ 2023-10-16 20:20:40.196746: Pseudo dice [0.971, 0.8672, 0.9097, 0.8688, 0.9309, 0.6475]
632
+ 2023-10-16 20:20:40.196844: Epoch time: 346.88 s
633
+ 2023-10-16 20:20:41.443801:
634
+ 2023-10-16 20:20:41.443908: Epoch 737
635
+ 2023-10-16 20:20:41.444032: Current learning rate: 0.00301
636
+ 2023-10-16 20:26:28.367692: train_loss -0.7729
637
+ 2023-10-16 20:26:28.367858: val_loss -0.6829
638
+ 2023-10-16 20:26:28.367961: Pseudo dice [0.9629, 0.881, 0.9185, 0.8668, 0.8715, 0.6689]
639
+ 2023-10-16 20:26:28.368046: Epoch time: 346.92 s
640
+ 2023-10-16 20:26:29.612815:
641
+ 2023-10-16 20:26:29.612922: Epoch 738
642
+ 2023-10-16 20:26:29.613036: Current learning rate: 0.003
643
+ 2023-10-16 20:32:16.500695: train_loss -0.8012
644
+ 2023-10-16 20:32:16.500844: val_loss -0.6784
645
+ 2023-10-16 20:32:16.500956: Pseudo dice [0.9708, 0.8696, 0.9136, 0.8691, 0.8652, 0.6666]
646
+ 2023-10-16 20:32:16.501048: Epoch time: 346.89 s
647
+ 2023-10-16 20:32:17.760465:
648
+ 2023-10-16 20:32:17.760568: Epoch 739
649
+ 2023-10-16 20:32:17.760695: Current learning rate: 0.00299
650
+ 2023-10-16 20:38:04.592379: train_loss -0.7674
651
+ 2023-10-16 20:38:04.592530: val_loss -0.6908
652
+ 2023-10-16 20:38:04.592652: Pseudo dice [0.9724, 0.8714, 0.902, 0.8697, 0.8764, 0.6592]
653
+ 2023-10-16 20:38:04.592737: Epoch time: 346.83 s
654
+ 2023-10-16 20:38:05.837263:
655
+ 2023-10-16 20:38:05.837362: Epoch 740
656
+ 2023-10-16 20:38:05.837475: Current learning rate: 0.00297
657
+ 2023-10-16 20:43:52.660144: train_loss -0.7516
658
+ 2023-10-16 20:43:52.660285: val_loss -0.6461
659
+ 2023-10-16 20:43:52.660398: Pseudo dice [0.9706, 0.87, 0.8958, 0.8801, 0.8641, 0.7011]
660
+ 2023-10-16 20:43:52.660489: Epoch time: 346.82 s
661
+ 2023-10-16 20:43:53.907556:
662
+ 2023-10-16 20:43:53.907661: Epoch 741
663
+ 2023-10-16 20:43:53.907781: Current learning rate: 0.00296
664
+ 2023-10-16 20:49:40.771184: train_loss -0.7764
665
+ 2023-10-16 20:49:40.771330: val_loss -0.6637
666
+ 2023-10-16 20:49:40.771452: Pseudo dice [0.9724, 0.8785, 0.9024, 0.8671, 0.8626, 0.6238]
667
+ 2023-10-16 20:49:40.771538: Epoch time: 346.86 s
668
+ 2023-10-16 20:49:42.199558:
669
+ 2023-10-16 20:49:42.199669: Epoch 742
670
+ 2023-10-16 20:49:42.199785: Current learning rate: 0.00295
671
+ 2023-10-16 20:55:29.083561: train_loss -0.7812
672
+ 2023-10-16 20:55:29.083701: val_loss -0.6515
673
+ 2023-10-16 20:55:29.083814: Pseudo dice [0.9702, 0.8539, 0.9124, 0.8706, 0.8343, 0.6397]
674
+ 2023-10-16 20:55:29.083905: Epoch time: 346.88 s
675
+ 2023-10-16 20:55:30.326373:
676
+ 2023-10-16 20:55:30.326590: Epoch 743
677
+ 2023-10-16 20:55:30.326766: Current learning rate: 0.00294
678
+ 2023-10-16 21:01:17.272231: train_loss -0.7812
679
+ 2023-10-16 21:01:17.272433: val_loss -0.6915
680
+ 2023-10-16 21:01:17.272554: Pseudo dice [0.9715, 0.8707, 0.8996, 0.8269, 0.8827, 0.6523]
681
+ 2023-10-16 21:01:17.272651: Epoch time: 346.95 s
682
+ 2023-10-16 21:01:18.527537:
683
+ 2023-10-16 21:01:18.527642: Epoch 744
684
+ 2023-10-16 21:01:18.527767: Current learning rate: 0.00293
685
+ 2023-10-16 21:07:05.379056: train_loss -0.7603
686
+ 2023-10-16 21:07:05.379226: val_loss -0.6876
687
+ 2023-10-16 21:07:05.379349: Pseudo dice [0.9726, 0.8639, 0.9083, 0.8809, 0.8898, 0.6921]
688
+ 2023-10-16 21:07:05.379447: Epoch time: 346.85 s
689
+ 2023-10-16 21:07:06.626625:
690
+ 2023-10-16 21:07:06.626747: Epoch 745
691
+ 2023-10-16 21:07:06.626876: Current learning rate: 0.00292
692
+ 2023-10-16 21:12:53.451590: train_loss -0.7823
693
+ 2023-10-16 21:12:53.451732: val_loss -0.6655
694
+ 2023-10-16 21:12:53.451844: Pseudo dice [0.9683, 0.8804, 0.899, 0.8178, 0.8839, 0.6644]
695
+ 2023-10-16 21:12:53.451934: Epoch time: 346.83 s
696
+ 2023-10-16 21:12:54.696416:
697
+ 2023-10-16 21:12:54.696591: Epoch 746
698
+ 2023-10-16 21:12:54.696789: Current learning rate: 0.00291
699
+ 2023-10-16 21:18:41.674343: train_loss -0.7672
700
+ 2023-10-16 21:18:41.674484: val_loss -0.678
701
+ 2023-10-16 21:18:41.674613: Pseudo dice [0.9738, 0.8722, 0.909, 0.9068, 0.9027, 0.6562]
702
+ 2023-10-16 21:18:41.674699: Epoch time: 346.98 s
703
+ 2023-10-16 21:18:42.962177:
704
+ 2023-10-16 21:18:42.962278: Epoch 747
705
+ 2023-10-16 21:18:42.962391: Current learning rate: 0.0029
706
+ 2023-10-16 21:24:29.754190: train_loss -0.7823
707
+ 2023-10-16 21:24:29.754351: val_loss -0.6516
708
+ 2023-10-16 21:24:29.754453: Pseudo dice [0.9742, 0.8777, 0.9024, 0.8644, 0.8886, 0.684]
709
+ 2023-10-16 21:24:29.754547: Epoch time: 346.79 s
710
+ 2023-10-16 21:24:31.000975:
711
+ 2023-10-16 21:24:31.001076: Epoch 748
712
+ 2023-10-16 21:24:31.001190: Current learning rate: 0.00289
713
+ 2023-10-16 21:30:18.081588: train_loss -0.7763
714
+ 2023-10-16 21:30:18.081762: val_loss -0.6687
715
+ 2023-10-16 21:30:18.081884: Pseudo dice [0.9736, 0.8657, 0.9113, 0.8781, 0.8645, 0.6676]
716
+ 2023-10-16 21:30:18.081988: Epoch time: 347.08 s
717
+ 2023-10-16 21:30:19.327024:
718
+ 2023-10-16 21:30:19.327136: Epoch 749
719
+ 2023-10-16 21:30:19.327245: Current learning rate: 0.00288
720
+ 2023-10-16 21:36:06.313214: train_loss -0.769
721
+ 2023-10-16 21:36:06.313378: val_loss -0.6887
722
+ 2023-10-16 21:36:06.313504: Pseudo dice [0.9696, 0.8651, 0.9186, 0.8477, 0.8635, 0.6643]
723
+ 2023-10-16 21:36:06.313602: Epoch time: 346.99 s
724
+ 2023-10-16 21:36:09.322969:
725
+ 2023-10-16 21:36:09.323163: Epoch 750
726
+ 2023-10-16 21:36:09.323311: Current learning rate: 0.00287
727
+ 2023-10-16 21:41:56.237420: train_loss -0.7798
728
+ 2023-10-16 21:41:56.237570: val_loss -0.6466
729
+ 2023-10-16 21:41:56.237676: Pseudo dice [0.9738, 0.8774, 0.8983, 0.8788, 0.8519, 0.6585]
730
+ 2023-10-16 21:41:56.237762: Epoch time: 346.92 s
731
+ 2023-10-16 21:41:57.492184:
732
+ 2023-10-16 21:41:57.492290: Epoch 751
733
+ 2023-10-16 21:41:57.492398: Current learning rate: 0.00286
734
+ 2023-10-16 21:47:44.454530: train_loss -0.7814
735
+ 2023-10-16 21:47:44.454681: val_loss -0.6659
736
+ 2023-10-16 21:47:44.454795: Pseudo dice [0.9697, 0.8644, 0.9237, 0.9063, 0.8612, 0.6375]
737
+ 2023-10-16 21:47:44.454881: Epoch time: 346.96 s
738
+ 2023-10-16 21:47:45.699300:
739
+ 2023-10-16 21:47:45.699405: Epoch 752
740
+ 2023-10-16 21:47:45.699520: Current learning rate: 0.00285
741
+ 2023-10-16 21:53:32.651409: train_loss -0.805
742
+ 2023-10-16 21:53:32.651662: val_loss -0.6821
743
+ 2023-10-16 21:53:32.651787: Pseudo dice [0.9717, 0.8596, 0.9134, 0.8739, 0.8912, 0.6436]
744
+ 2023-10-16 21:53:32.651883: Epoch time: 346.95 s
745
+ 2023-10-16 21:53:33.894313:
746
+ 2023-10-16 21:53:33.894414: Epoch 753
747
+ 2023-10-16 21:53:33.894536: Current learning rate: 0.00284
748
+ 2023-10-16 21:59:20.734253: train_loss -0.7614
749
+ 2023-10-16 21:59:20.734413: val_loss -0.7219
750
+ 2023-10-16 21:59:20.734549: Pseudo dice [0.9706, 0.8697, 0.9083, 0.8744, 0.8853, 0.6663]
751
+ 2023-10-16 21:59:20.734730: Epoch time: 346.84 s
752
+ 2023-10-16 21:59:21.983121:
753
+ 2023-10-16 21:59:21.983289: Epoch 754
754
+ 2023-10-16 21:59:21.983458: Current learning rate: 0.00283
755
+ 2023-10-16 22:05:08.729062: train_loss -0.7599
756
+ 2023-10-16 22:05:08.729207: val_loss -0.6608
757
+ 2023-10-16 22:05:08.729323: Pseudo dice [0.9738, 0.8761, 0.9012, 0.9026, 0.9033, 0.6759]
758
+ 2023-10-16 22:05:08.729420: Epoch time: 346.75 s
759
+ 2023-10-16 22:05:10.152111:
760
+ 2023-10-16 22:05:10.152219: Epoch 755
761
+ 2023-10-16 22:05:10.152333: Current learning rate: 0.00282
762
+ 2023-10-16 22:10:56.905413: train_loss -0.7884
763
+ 2023-10-16 22:10:56.905557: val_loss -0.7104
764
+ 2023-10-16 22:10:56.905695: Pseudo dice [0.9683, 0.8676, 0.9096, 0.8374, 0.8668, 0.6558]
765
+ 2023-10-16 22:10:56.905791: Epoch time: 346.75 s
766
+ 2023-10-16 22:10:58.149326:
767
+ 2023-10-16 22:10:58.149428: Epoch 756
768
+ 2023-10-16 22:10:58.149547: Current learning rate: 0.00281
769
+ 2023-10-16 22:16:44.896919: train_loss -0.7781
770
+ 2023-10-16 22:16:44.897083: val_loss -0.7192
771
+ 2023-10-16 22:16:44.897204: Pseudo dice [0.9706, 0.8774, 0.9037, 0.8773, 0.9042, 0.677]
772
+ 2023-10-16 22:16:44.897301: Epoch time: 346.75 s
773
+ 2023-10-16 22:16:46.143886:
774
+ 2023-10-16 22:16:46.144010: Epoch 757
775
+ 2023-10-16 22:16:46.144125: Current learning rate: 0.0028
776
+ 2023-10-16 22:22:33.012759: train_loss -0.7888
777
+ 2023-10-16 22:22:33.012903: val_loss -0.6814
778
+ 2023-10-16 22:22:33.013015: Pseudo dice [0.9729, 0.8683, 0.9104, 0.8744, 0.8768, 0.7198]
779
+ 2023-10-16 22:22:33.013106: Epoch time: 346.87 s
780
+ 2023-10-16 22:22:34.259762:
781
+ 2023-10-16 22:22:34.259865: Epoch 758
782
+ 2023-10-16 22:22:34.259990: Current learning rate: 0.00279
783
+ 2023-10-16 22:28:21.064841: train_loss -0.7361
784
+ 2023-10-16 22:28:21.064984: val_loss -0.6864
785
+ 2023-10-16 22:28:21.065100: Pseudo dice [0.9648, 0.8821, 0.9176, 0.8619, 0.8589, 0.6633]
786
+ 2023-10-16 22:28:21.065188: Epoch time: 346.81 s
787
+ 2023-10-16 22:28:22.319604:
788
+ 2023-10-16 22:28:22.319710: Epoch 759
789
+ 2023-10-16 22:28:22.319818: Current learning rate: 0.00278
790
+ 2023-10-16 22:34:09.133929: train_loss -0.7883
791
+ 2023-10-16 22:34:09.134073: val_loss -0.6856
792
+ 2023-10-16 22:34:09.134192: Pseudo dice [0.9726, 0.8688, 0.9055, 0.8427, 0.8812, 0.6554]
793
+ 2023-10-16 22:34:09.134277: Epoch time: 346.82 s
794
+ 2023-10-16 22:34:10.381863:
795
+ 2023-10-16 22:34:10.382043: Epoch 760
796
+ 2023-10-16 22:34:10.382210: Current learning rate: 0.00277
797
+ 2023-10-16 22:39:57.259468: train_loss -0.7825
798
+ 2023-10-16 22:39:57.259617: val_loss -0.6793
799
+ 2023-10-16 22:39:57.259732: Pseudo dice [0.9727, 0.8735, 0.9076, 0.8929, 0.8783, 0.6945]
800
+ 2023-10-16 22:39:57.259821: Epoch time: 346.88 s
801
+ 2023-10-16 22:39:58.531965:
802
+ 2023-10-16 22:39:58.532070: Epoch 761
803
+ 2023-10-16 22:39:58.532183: Current learning rate: 0.00276
804
+ 2023-10-16 22:45:45.485026: train_loss -0.7712
805
+ 2023-10-16 22:45:45.485170: val_loss -0.6874
806
+ 2023-10-16 22:45:45.485283: Pseudo dice [0.9723, 0.8793, 0.9192, 0.8844, 0.9144, 0.6516]
807
+ 2023-10-16 22:45:45.485373: Epoch time: 346.95 s
808
+ 2023-10-16 22:45:46.926359:
809
+ 2023-10-16 22:45:46.926492: Epoch 762
810
+ 2023-10-16 22:45:46.926620: Current learning rate: 0.00275
811
+ 2023-10-16 22:51:33.977682: train_loss -0.7857
812
+ 2023-10-16 22:51:33.977825: val_loss -0.6443
813
+ 2023-10-16 22:51:33.977939: Pseudo dice [0.9727, 0.8767, 0.8949, 0.8151, 0.8862, 0.6678]
814
+ 2023-10-16 22:51:33.978030: Epoch time: 347.05 s
815
+ 2023-10-16 22:51:35.253114:
816
+ 2023-10-16 22:51:35.253222: Epoch 763
817
+ 2023-10-16 22:51:35.253348: Current learning rate: 0.00274
818
+ 2023-10-16 22:57:22.227417: train_loss -0.7944
819
+ 2023-10-16 22:57:22.227562: val_loss -0.6669
820
+ 2023-10-16 22:57:22.227674: Pseudo dice [0.9687, 0.8757, 0.9031, 0.8526, 0.863, 0.6839]
821
+ 2023-10-16 22:57:22.227766: Epoch time: 346.98 s
822
+ 2023-10-16 22:57:23.495120:
823
+ 2023-10-16 22:57:23.495229: Epoch 764
824
+ 2023-10-16 22:57:23.495349: Current learning rate: 0.00273
825
+ 2023-10-16 23:03:10.324488: train_loss -0.7739
826
+ 2023-10-16 23:03:10.324644: val_loss -0.6643
827
+ 2023-10-16 23:03:10.324748: Pseudo dice [0.9693, 0.8623, 0.9022, 0.8465, 0.868, 0.6316]
828
+ 2023-10-16 23:03:10.324842: Epoch time: 346.83 s
829
+ 2023-10-16 23:03:11.590431:
830
+ 2023-10-16 23:03:11.590558: Epoch 765
831
+ 2023-10-16 23:03:11.590662: Current learning rate: 0.00272
832
+ 2023-10-16 23:08:58.534127: train_loss -0.7627
833
+ 2023-10-16 23:08:58.534281: val_loss -0.6885
834
+ 2023-10-16 23:08:58.534384: Pseudo dice [0.9727, 0.8742, 0.8991, 0.8921, 0.8761, 0.721]
835
+ 2023-10-16 23:08:58.534471: Epoch time: 346.94 s
836
+ 2023-10-16 23:08:59.799573:
837
+ 2023-10-16 23:08:59.799681: Epoch 766
838
+ 2023-10-16 23:08:59.799800: Current learning rate: 0.00271
839
+ 2023-10-16 23:14:46.783532: train_loss -0.7838
840
+ 2023-10-16 23:14:46.783687: val_loss -0.6636
841
+ 2023-10-16 23:14:46.783791: Pseudo dice [0.9722, 0.8678, 0.9032, 0.8832, 0.8708, 0.6741]
842
+ 2023-10-16 23:14:46.783879: Epoch time: 346.98 s
843
+ 2023-10-16 23:14:48.045738:
844
+ 2023-10-16 23:14:48.045854: Epoch 767
845
+ 2023-10-16 23:14:48.045959: Current learning rate: 0.0027
846
+ 2023-10-16 23:20:34.960293: train_loss -0.7738
847
+ 2023-10-16 23:20:34.960457: val_loss -0.6693
848
+ 2023-10-16 23:20:34.960583: Pseudo dice [0.9721, 0.8671, 0.9128, 0.8691, 0.88, 0.6513]
849
+ 2023-10-16 23:20:34.960680: Epoch time: 346.92 s
850
+ 2023-10-16 23:20:36.431780:
851
+ 2023-10-16 23:20:36.431914: Epoch 768
852
+ 2023-10-16 23:20:36.432017: Current learning rate: 0.00268
853
+ 2023-10-16 23:26:23.355504: train_loss -0.7621
854
+ 2023-10-16 23:26:23.355654: val_loss -0.6347
855
+ 2023-10-16 23:26:23.355771: Pseudo dice [0.9736, 0.8705, 0.8865, 0.8463, 0.8879, 0.6394]
856
+ 2023-10-16 23:26:23.355858: Epoch time: 346.92 s
857
+ 2023-10-16 23:26:24.643907:
858
+ 2023-10-16 23:26:24.644099: Epoch 769
859
+ 2023-10-16 23:26:24.644284: Current learning rate: 0.00267
860
+ 2023-10-16 23:32:11.696220: train_loss -0.772
861
+ 2023-10-16 23:32:11.696359: val_loss -0.7176
862
+ 2023-10-16 23:32:11.696473: Pseudo dice [0.9737, 0.8809, 0.9153, 0.8991, 0.8772, 0.673]
863
+ 2023-10-16 23:32:11.696564: Epoch time: 347.05 s
864
+ 2023-10-16 23:32:12.978057:
865
+ 2023-10-16 23:32:12.978164: Epoch 770
866
+ 2023-10-16 23:32:12.978276: Current learning rate: 0.00266
867
+ 2023-10-16 23:37:59.855270: train_loss -0.7751
868
+ 2023-10-16 23:37:59.855424: val_loss -0.6703
869
+ 2023-10-16 23:37:59.855527: Pseudo dice [0.9715, 0.8624, 0.904, 0.8745, 0.8474, 0.6303]
870
+ 2023-10-16 23:37:59.855613: Epoch time: 346.88 s
871
+ 2023-10-16 23:38:01.119642:
872
+ 2023-10-16 23:38:01.119751: Epoch 771
873
+ 2023-10-16 23:38:01.119865: Current learning rate: 0.00265
874
+ 2023-10-16 23:43:48.086070: train_loss -0.7857
875
+ 2023-10-16 23:43:48.086206: val_loss -0.7146
876
+ 2023-10-16 23:43:48.086318: Pseudo dice [0.973, 0.8639, 0.899, 0.8673, 0.8598, 0.6267]
877
+ 2023-10-16 23:43:48.086406: Epoch time: 346.97 s
878
+ 2023-10-16 23:43:49.371821:
879
+ 2023-10-16 23:43:49.372040: Epoch 772
880
+ 2023-10-16 23:43:49.372244: Current learning rate: 0.00264
881
+ 2023-10-16 23:49:36.241262: train_loss -0.7812
882
+ 2023-10-16 23:49:36.241413: val_loss -0.6925
883
+ 2023-10-16 23:49:36.241530: Pseudo dice [0.9729, 0.8709, 0.9125, 0.8367, 0.8972, 0.6604]
884
+ 2023-10-16 23:49:36.241616: Epoch time: 346.87 s
885
+ 2023-10-16 23:49:37.501805:
886
+ 2023-10-16 23:49:37.501961: Epoch 773
887
+ 2023-10-16 23:49:37.502124: Current learning rate: 0.00263
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/plans.json ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Dataset720_TSPrime",
3
+ "plans_name": "nnUNetPlans",
4
+ "original_median_spacing_after_transp": [
5
+ 2.5,
6
+ 1.269531011581421,
7
+ 1.269531011581421
8
+ ],
9
+ "original_median_shape_after_transp": [
10
+ 241,
11
+ 512,
12
+ 512
13
+ ],
14
+ "image_reader_writer": "SimpleITKIO",
15
+ "transpose_forward": [
16
+ 0,
17
+ 1,
18
+ 2
19
+ ],
20
+ "transpose_backward": [
21
+ 0,
22
+ 1,
23
+ 2
24
+ ],
25
+ "configurations": {
26
+ "2d": {
27
+ "data_identifier": "nnUNetPlans_2d",
28
+ "preprocessor_name": "DefaultPreprocessor",
29
+ "batch_size": 12,
30
+ "patch_size": [
31
+ 512,
32
+ 512
33
+ ],
34
+ "median_image_size_in_voxels": [
35
+ 512.0,
36
+ 512.0
37
+ ],
38
+ "spacing": [
39
+ 1.269531011581421,
40
+ 1.269531011581421
41
+ ],
42
+ "normalization_schemes": [
43
+ "CTNormalization"
44
+ ],
45
+ "use_mask_for_norm": [
46
+ false
47
+ ],
48
+ "UNet_class_name": "PlainConvUNet",
49
+ "UNet_base_num_features": 32,
50
+ "n_conv_per_stage_encoder": [
51
+ 2,
52
+ 2,
53
+ 2,
54
+ 2,
55
+ 2,
56
+ 2,
57
+ 2,
58
+ 2
59
+ ],
60
+ "n_conv_per_stage_decoder": [
61
+ 2,
62
+ 2,
63
+ 2,
64
+ 2,
65
+ 2,
66
+ 2,
67
+ 2
68
+ ],
69
+ "num_pool_per_axis": [
70
+ 7,
71
+ 7
72
+ ],
73
+ "pool_op_kernel_sizes": [
74
+ [
75
+ 1,
76
+ 1
77
+ ],
78
+ [
79
+ 2,
80
+ 2
81
+ ],
82
+ [
83
+ 2,
84
+ 2
85
+ ],
86
+ [
87
+ 2,
88
+ 2
89
+ ],
90
+ [
91
+ 2,
92
+ 2
93
+ ],
94
+ [
95
+ 2,
96
+ 2
97
+ ],
98
+ [
99
+ 2,
100
+ 2
101
+ ],
102
+ [
103
+ 2,
104
+ 2
105
+ ]
106
+ ],
107
+ "conv_kernel_sizes": [
108
+ [
109
+ 3,
110
+ 3
111
+ ],
112
+ [
113
+ 3,
114
+ 3
115
+ ],
116
+ [
117
+ 3,
118
+ 3
119
+ ],
120
+ [
121
+ 3,
122
+ 3
123
+ ],
124
+ [
125
+ 3,
126
+ 3
127
+ ],
128
+ [
129
+ 3,
130
+ 3
131
+ ],
132
+ [
133
+ 3,
134
+ 3
135
+ ],
136
+ [
137
+ 3,
138
+ 3
139
+ ]
140
+ ],
141
+ "unet_max_num_features": 512,
142
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
143
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
144
+ "resampling_fn_data_kwargs": {
145
+ "is_seg": false,
146
+ "order": 3,
147
+ "order_z": 0,
148
+ "force_separate_z": null
149
+ },
150
+ "resampling_fn_seg_kwargs": {
151
+ "is_seg": true,
152
+ "order": 1,
153
+ "order_z": 0,
154
+ "force_separate_z": null
155
+ },
156
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
157
+ "resampling_fn_probabilities_kwargs": {
158
+ "is_seg": false,
159
+ "order": 1,
160
+ "order_z": 0,
161
+ "force_separate_z": null
162
+ },
163
+ "batch_dice": true
164
+ },
165
+ "3d_lowres": {
166
+ "data_identifier": "nnUNetPlans_3d_lowres",
167
+ "preprocessor_name": "DefaultPreprocessor",
168
+ "batch_size": 2,
169
+ "patch_size": [
170
+ 80,
171
+ 192,
172
+ 160
173
+ ],
174
+ "median_image_size_in_voxels": [
175
+ 130,
176
+ 275,
177
+ 275
178
+ ],
179
+ "spacing": [
180
+ 4.650736429273743,
181
+ 2.361701649461784,
182
+ 2.361701649461784
183
+ ],
184
+ "normalization_schemes": [
185
+ "CTNormalization"
186
+ ],
187
+ "use_mask_for_norm": [
188
+ false
189
+ ],
190
+ "UNet_class_name": "PlainConvUNet",
191
+ "UNet_base_num_features": 32,
192
+ "n_conv_per_stage_encoder": [
193
+ 2,
194
+ 2,
195
+ 2,
196
+ 2,
197
+ 2,
198
+ 2
199
+ ],
200
+ "n_conv_per_stage_decoder": [
201
+ 2,
202
+ 2,
203
+ 2,
204
+ 2,
205
+ 2
206
+ ],
207
+ "num_pool_per_axis": [
208
+ 4,
209
+ 5,
210
+ 5
211
+ ],
212
+ "pool_op_kernel_sizes": [
213
+ [
214
+ 1,
215
+ 1,
216
+ 1
217
+ ],
218
+ [
219
+ 2,
220
+ 2,
221
+ 2
222
+ ],
223
+ [
224
+ 2,
225
+ 2,
226
+ 2
227
+ ],
228
+ [
229
+ 2,
230
+ 2,
231
+ 2
232
+ ],
233
+ [
234
+ 2,
235
+ 2,
236
+ 2
237
+ ],
238
+ [
239
+ 1,
240
+ 2,
241
+ 2
242
+ ]
243
+ ],
244
+ "conv_kernel_sizes": [
245
+ [
246
+ 3,
247
+ 3,
248
+ 3
249
+ ],
250
+ [
251
+ 3,
252
+ 3,
253
+ 3
254
+ ],
255
+ [
256
+ 3,
257
+ 3,
258
+ 3
259
+ ],
260
+ [
261
+ 3,
262
+ 3,
263
+ 3
264
+ ],
265
+ [
266
+ 3,
267
+ 3,
268
+ 3
269
+ ],
270
+ [
271
+ 3,
272
+ 3,
273
+ 3
274
+ ]
275
+ ],
276
+ "unet_max_num_features": 320,
277
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
278
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
279
+ "resampling_fn_data_kwargs": {
280
+ "is_seg": false,
281
+ "order": 3,
282
+ "order_z": 0,
283
+ "force_separate_z": null
284
+ },
285
+ "resampling_fn_seg_kwargs": {
286
+ "is_seg": true,
287
+ "order": 1,
288
+ "order_z": 0,
289
+ "force_separate_z": null
290
+ },
291
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
292
+ "resampling_fn_probabilities_kwargs": {
293
+ "is_seg": false,
294
+ "order": 1,
295
+ "order_z": 0,
296
+ "force_separate_z": null
297
+ },
298
+ "batch_dice": false,
299
+ "next_stage": "3d_cascade_fullres"
300
+ },
301
+ "3d_fullres": {
302
+ "data_identifier": "nnUNetPlans_3d_fullres",
303
+ "preprocessor_name": "DefaultPreprocessor",
304
+ "batch_size": 2,
305
+ "patch_size": [
306
+ 80,
307
+ 192,
308
+ 160
309
+ ],
310
+ "median_image_size_in_voxels": [
311
+ 241.0,
312
+ 512.0,
313
+ 512.0
314
+ ],
315
+ "spacing": [
316
+ 2.5,
317
+ 1.269531011581421,
318
+ 1.269531011581421
319
+ ],
320
+ "normalization_schemes": [
321
+ "CTNormalization"
322
+ ],
323
+ "use_mask_for_norm": [
324
+ false
325
+ ],
326
+ "UNet_class_name": "PlainConvUNet",
327
+ "UNet_base_num_features": 32,
328
+ "n_conv_per_stage_encoder": [
329
+ 2,
330
+ 2,
331
+ 2,
332
+ 2,
333
+ 2,
334
+ 2
335
+ ],
336
+ "n_conv_per_stage_decoder": [
337
+ 2,
338
+ 2,
339
+ 2,
340
+ 2,
341
+ 2
342
+ ],
343
+ "num_pool_per_axis": [
344
+ 4,
345
+ 5,
346
+ 5
347
+ ],
348
+ "pool_op_kernel_sizes": [
349
+ [
350
+ 1,
351
+ 1,
352
+ 1
353
+ ],
354
+ [
355
+ 2,
356
+ 2,
357
+ 2
358
+ ],
359
+ [
360
+ 2,
361
+ 2,
362
+ 2
363
+ ],
364
+ [
365
+ 2,
366
+ 2,
367
+ 2
368
+ ],
369
+ [
370
+ 2,
371
+ 2,
372
+ 2
373
+ ],
374
+ [
375
+ 1,
376
+ 2,
377
+ 2
378
+ ]
379
+ ],
380
+ "conv_kernel_sizes": [
381
+ [
382
+ 3,
383
+ 3,
384
+ 3
385
+ ],
386
+ [
387
+ 3,
388
+ 3,
389
+ 3
390
+ ],
391
+ [
392
+ 3,
393
+ 3,
394
+ 3
395
+ ],
396
+ [
397
+ 3,
398
+ 3,
399
+ 3
400
+ ],
401
+ [
402
+ 3,
403
+ 3,
404
+ 3
405
+ ],
406
+ [
407
+ 3,
408
+ 3,
409
+ 3
410
+ ]
411
+ ],
412
+ "unet_max_num_features": 320,
413
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
414
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
415
+ "resampling_fn_data_kwargs": {
416
+ "is_seg": false,
417
+ "order": 3,
418
+ "order_z": 0,
419
+ "force_separate_z": null
420
+ },
421
+ "resampling_fn_seg_kwargs": {
422
+ "is_seg": true,
423
+ "order": 1,
424
+ "order_z": 0,
425
+ "force_separate_z": null
426
+ },
427
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
428
+ "resampling_fn_probabilities_kwargs": {
429
+ "is_seg": false,
430
+ "order": 1,
431
+ "order_z": 0,
432
+ "force_separate_z": null
433
+ },
434
+ "batch_dice": true
435
+ },
436
+ "3d_cascade_fullres": {
437
+ "inherits_from": "3d_fullres",
438
+ "previous_stage": "3d_lowres"
439
+ }
440
+ },
441
+ "experiment_planner_used": "ExperimentPlanner",
442
+ "label_manager": "LabelManager",
443
+ "foreground_intensity_properties_per_channel": {
444
+ "0": {
445
+ "max": 1620.0,
446
+ "mean": -38.229164123535156,
447
+ "median": -54.0,
448
+ "min": -1000.0,
449
+ "percentile_00_5": -941.0,
450
+ "percentile_99_5": 897.0,
451
+ "std": 192.37086486816406
452
+ }
453
+ }
454
+ }
Dataset720_TSPrime/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/postprocessing.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97b914ebfa1828ffde547b0262473ade65698112999ae10d07f85d2586cf6e9b
3
+ size 388
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/dataset.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "CT"
4
+ },
5
+ "labels": {
6
+ "background": 0,
7
+ "Ctvp": 1
8
+ },
9
+ "numTraining": 60,
10
+ "file_ending": ".nii.gz",
11
+ "numTest": 0
12
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/dataset_fingerprint.json ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "foreground_intensity_properties_per_channel": {
3
+ "0": {
4
+ "max": 882.0,
5
+ "mean": 45.35713577270508,
6
+ "median": 48.0,
7
+ "min": -118.0,
8
+ "percentile_00_5": -48.0,
9
+ "percentile_99_5": 103.0,
10
+ "std": 26.203161239624023
11
+ }
12
+ },
13
+ "median_relative_size_after_cropping": 1.0,
14
+ "shapes_after_crop": [
15
+ [
16
+ 230,
17
+ 512,
18
+ 512
19
+ ],
20
+ [
21
+ 240,
22
+ 512,
23
+ 512
24
+ ],
25
+ [
26
+ 260,
27
+ 512,
28
+ 512
29
+ ],
30
+ [
31
+ 215,
32
+ 512,
33
+ 512
34
+ ],
35
+ [
36
+ 260,
37
+ 512,
38
+ 512
39
+ ],
40
+ [
41
+ 220,
42
+ 512,
43
+ 512
44
+ ],
45
+ [
46
+ 210,
47
+ 512,
48
+ 512
49
+ ],
50
+ [
51
+ 240,
52
+ 512,
53
+ 512
54
+ ],
55
+ [
56
+ 265,
57
+ 512,
58
+ 512
59
+ ],
60
+ [
61
+ 229,
62
+ 512,
63
+ 512
64
+ ],
65
+ [
66
+ 230,
67
+ 512,
68
+ 512
69
+ ],
70
+ [
71
+ 243,
72
+ 512,
73
+ 512
74
+ ],
75
+ [
76
+ 230,
77
+ 512,
78
+ 512
79
+ ],
80
+ [
81
+ 250,
82
+ 512,
83
+ 512
84
+ ],
85
+ [
86
+ 250,
87
+ 512,
88
+ 512
89
+ ],
90
+ [
91
+ 245,
92
+ 512,
93
+ 512
94
+ ],
95
+ [
96
+ 235,
97
+ 512,
98
+ 512
99
+ ],
100
+ [
101
+ 250,
102
+ 512,
103
+ 512
104
+ ],
105
+ [
106
+ 242,
107
+ 512,
108
+ 512
109
+ ],
110
+ [
111
+ 241,
112
+ 512,
113
+ 512
114
+ ],
115
+ [
116
+ 210,
117
+ 512,
118
+ 512
119
+ ],
120
+ [
121
+ 255,
122
+ 512,
123
+ 512
124
+ ],
125
+ [
126
+ 246,
127
+ 512,
128
+ 512
129
+ ],
130
+ [
131
+ 240,
132
+ 512,
133
+ 512
134
+ ],
135
+ [
136
+ 245,
137
+ 512,
138
+ 512
139
+ ],
140
+ [
141
+ 250,
142
+ 512,
143
+ 512
144
+ ],
145
+ [
146
+ 249,
147
+ 512,
148
+ 512
149
+ ],
150
+ [
151
+ 210,
152
+ 512,
153
+ 512
154
+ ],
155
+ [
156
+ 210,
157
+ 512,
158
+ 512
159
+ ],
160
+ [
161
+ 244,
162
+ 512,
163
+ 512
164
+ ],
165
+ [
166
+ 230,
167
+ 512,
168
+ 512
169
+ ],
170
+ [
171
+ 235,
172
+ 512,
173
+ 512
174
+ ],
175
+ [
176
+ 260,
177
+ 512,
178
+ 512
179
+ ],
180
+ [
181
+ 241,
182
+ 512,
183
+ 512
184
+ ],
185
+ [
186
+ 220,
187
+ 512,
188
+ 512
189
+ ],
190
+ [
191
+ 240,
192
+ 512,
193
+ 512
194
+ ],
195
+ [
196
+ 190,
197
+ 512,
198
+ 512
199
+ ],
200
+ [
201
+ 255,
202
+ 512,
203
+ 512
204
+ ],
205
+ [
206
+ 230,
207
+ 512,
208
+ 512
209
+ ],
210
+ [
211
+ 255,
212
+ 512,
213
+ 512
214
+ ],
215
+ [
216
+ 236,
217
+ 512,
218
+ 512
219
+ ],
220
+ [
221
+ 241,
222
+ 512,
223
+ 512
224
+ ],
225
+ [
226
+ 220,
227
+ 512,
228
+ 512
229
+ ],
230
+ [
231
+ 241,
232
+ 512,
233
+ 512
234
+ ],
235
+ [
236
+ 245,
237
+ 512,
238
+ 512
239
+ ],
240
+ [
241
+ 241,
242
+ 512,
243
+ 512
244
+ ],
245
+ [
246
+ 250,
247
+ 512,
248
+ 512
249
+ ],
250
+ [
251
+ 210,
252
+ 512,
253
+ 512
254
+ ],
255
+ [
256
+ 250,
257
+ 512,
258
+ 512
259
+ ],
260
+ [
261
+ 266,
262
+ 512,
263
+ 512
264
+ ],
265
+ [
266
+ 220,
267
+ 512,
268
+ 512
269
+ ],
270
+ [
271
+ 230,
272
+ 512,
273
+ 512
274
+ ],
275
+ [
276
+ 280,
277
+ 512,
278
+ 512
279
+ ],
280
+ [
281
+ 260,
282
+ 512,
283
+ 512
284
+ ],
285
+ [
286
+ 245,
287
+ 512,
288
+ 512
289
+ ],
290
+ [
291
+ 220,
292
+ 512,
293
+ 512
294
+ ],
295
+ [
296
+ 240,
297
+ 512,
298
+ 512
299
+ ],
300
+ [
301
+ 250,
302
+ 512,
303
+ 512
304
+ ],
305
+ [
306
+ 226,
307
+ 512,
308
+ 512
309
+ ],
310
+ [
311
+ 240,
312
+ 512,
313
+ 512
314
+ ]
315
+ ],
316
+ "spacings": [
317
+ [
318
+ 2.5,
319
+ 1.269531011581421,
320
+ 1.269531011581421
321
+ ],
322
+ [
323
+ 2.5,
324
+ 1.269531011581421,
325
+ 1.269531011581421
326
+ ],
327
+ [
328
+ 2.5,
329
+ 1.269531011581421,
330
+ 1.269531011581421
331
+ ],
332
+ [
333
+ 2.5,
334
+ 1.269531011581421,
335
+ 1.269531011581421
336
+ ],
337
+ [
338
+ 2.5,
339
+ 1.269531011581421,
340
+ 1.269531011581421
341
+ ],
342
+ [
343
+ 2.5,
344
+ 1.269531011581421,
345
+ 1.269531011581421
346
+ ],
347
+ [
348
+ 2.5,
349
+ 1.269531011581421,
350
+ 1.269531011581421
351
+ ],
352
+ [
353
+ 2.5,
354
+ 1.269531011581421,
355
+ 1.269531011581421
356
+ ],
357
+ [
358
+ 2.5,
359
+ 1.269531011581421,
360
+ 1.269531011581421
361
+ ],
362
+ [
363
+ 2.5,
364
+ 1.269531011581421,
365
+ 1.269531011581421
366
+ ],
367
+ [
368
+ 2.5,
369
+ 1.269531011581421,
370
+ 1.269531011581421
371
+ ],
372
+ [
373
+ 2.5,
374
+ 1.269531011581421,
375
+ 1.269531011581421
376
+ ],
377
+ [
378
+ 2.5,
379
+ 1.269531011581421,
380
+ 1.269531011581421
381
+ ],
382
+ [
383
+ 2.5,
384
+ 1.269531011581421,
385
+ 1.269531011581421
386
+ ],
387
+ [
388
+ 2.5,
389
+ 1.269531011581421,
390
+ 1.269531011581421
391
+ ],
392
+ [
393
+ 2.5,
394
+ 1.269531011581421,
395
+ 1.269531011581421
396
+ ],
397
+ [
398
+ 2.5,
399
+ 1.269531011581421,
400
+ 1.269531011581421
401
+ ],
402
+ [
403
+ 2.5,
404
+ 1.269531011581421,
405
+ 1.269531011581421
406
+ ],
407
+ [
408
+ 2.5,
409
+ 1.269531011581421,
410
+ 1.269531011581421
411
+ ],
412
+ [
413
+ 2.5,
414
+ 1.269531011581421,
415
+ 1.269531011581421
416
+ ],
417
+ [
418
+ 2.5,
419
+ 1.269531011581421,
420
+ 1.269531011581421
421
+ ],
422
+ [
423
+ 2.5,
424
+ 1.269531011581421,
425
+ 1.269531011581421
426
+ ],
427
+ [
428
+ 2.5,
429
+ 1.269531011581421,
430
+ 1.269531011581421
431
+ ],
432
+ [
433
+ 2.5,
434
+ 1.269531011581421,
435
+ 1.269531011581421
436
+ ],
437
+ [
438
+ 2.5,
439
+ 1.269531011581421,
440
+ 1.269531011581421
441
+ ],
442
+ [
443
+ 2.5,
444
+ 1.269531011581421,
445
+ 1.269531011581421
446
+ ],
447
+ [
448
+ 2.5,
449
+ 1.269531011581421,
450
+ 1.269531011581421
451
+ ],
452
+ [
453
+ 2.5,
454
+ 1.269531011581421,
455
+ 1.269531011581421
456
+ ],
457
+ [
458
+ 2.5,
459
+ 1.269531011581421,
460
+ 1.269531011581421
461
+ ],
462
+ [
463
+ 2.5,
464
+ 1.269531011581421,
465
+ 1.269531011581421
466
+ ],
467
+ [
468
+ 2.5,
469
+ 1.269531011581421,
470
+ 1.269531011581421
471
+ ],
472
+ [
473
+ 2.5,
474
+ 1.269531011581421,
475
+ 1.269531011581421
476
+ ],
477
+ [
478
+ 2.5,
479
+ 1.269531011581421,
480
+ 1.269531011581421
481
+ ],
482
+ [
483
+ 2.5,
484
+ 1.269531011581421,
485
+ 1.269531011581421
486
+ ],
487
+ [
488
+ 2.5,
489
+ 1.269531011581421,
490
+ 1.269531011581421
491
+ ],
492
+ [
493
+ 2.5,
494
+ 1.269531011581421,
495
+ 1.269531011581421
496
+ ],
497
+ [
498
+ 2.5,
499
+ 1.269531011581421,
500
+ 1.269531011581421
501
+ ],
502
+ [
503
+ 2.5,
504
+ 1.269531011581421,
505
+ 1.269531011581421
506
+ ],
507
+ [
508
+ 2.5,
509
+ 1.269531011581421,
510
+ 1.269531011581421
511
+ ],
512
+ [
513
+ 2.5,
514
+ 1.269531011581421,
515
+ 1.269531011581421
516
+ ],
517
+ [
518
+ 2.5,
519
+ 1.269531011581421,
520
+ 1.269531011581421
521
+ ],
522
+ [
523
+ 2.5,
524
+ 1.269531011581421,
525
+ 1.269531011581421
526
+ ],
527
+ [
528
+ 2.5,
529
+ 1.269531011581421,
530
+ 1.269531011581421
531
+ ],
532
+ [
533
+ 2.5,
534
+ 1.269531011581421,
535
+ 1.269531011581421
536
+ ],
537
+ [
538
+ 2.5,
539
+ 1.269531011581421,
540
+ 1.269531011581421
541
+ ],
542
+ [
543
+ 2.5,
544
+ 1.269531011581421,
545
+ 1.269531011581421
546
+ ],
547
+ [
548
+ 2.5,
549
+ 1.269531011581421,
550
+ 1.269531011581421
551
+ ],
552
+ [
553
+ 2.5,
554
+ 1.269531011581421,
555
+ 1.269531011581421
556
+ ],
557
+ [
558
+ 2.5,
559
+ 1.269531011581421,
560
+ 1.269531011581421
561
+ ],
562
+ [
563
+ 2.5,
564
+ 1.269531011581421,
565
+ 1.269531011581421
566
+ ],
567
+ [
568
+ 2.5,
569
+ 1.269531011581421,
570
+ 1.269531011581421
571
+ ],
572
+ [
573
+ 2.5,
574
+ 1.269531011581421,
575
+ 1.269531011581421
576
+ ],
577
+ [
578
+ 2.5,
579
+ 1.269531011581421,
580
+ 1.269531011581421
581
+ ],
582
+ [
583
+ 2.5,
584
+ 1.269531011581421,
585
+ 1.269531011581421
586
+ ],
587
+ [
588
+ 2.5,
589
+ 1.269531011581421,
590
+ 1.269531011581421
591
+ ],
592
+ [
593
+ 2.5,
594
+ 1.269531011581421,
595
+ 1.269531011581421
596
+ ],
597
+ [
598
+ 2.5,
599
+ 1.269531011581421,
600
+ 1.269531011581421
601
+ ],
602
+ [
603
+ 2.5,
604
+ 1.269531011581421,
605
+ 1.269531011581421
606
+ ],
607
+ [
608
+ 2.5,
609
+ 1.269531011581421,
610
+ 1.269531011581421
611
+ ],
612
+ [
613
+ 2.5,
614
+ 1.269531011581421,
615
+ 1.269531011581421
616
+ ]
617
+ ]
618
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/checkpoint_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bc5306307d22716b1c4a7919251fd422ddc5e45602caf05182086147d67273d
3
+ size 370792577
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/checkpoint_latest.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c7717f77f55e94cba15d52c3bf1f91d6a3c7b43892ee0e79055e60f9ba77d90
3
+ size 370793173
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/debug.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_best_ema": "None",
3
+ "batch_size": "12",
4
+ "configuration_manager": "{'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}",
5
+ "configuration_name": "2d",
6
+ "cudnn_version": 8500,
7
+ "current_epoch": "0",
8
+ "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f1c0e365190>",
9
+ "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_2d.nnUNetDataLoader2D object at 0x7f1c0e365d90>",
10
+ "dataloader_train.num_processes": "12",
11
+ "dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [512, 512], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-3.141592653589793, 3.141592653589793), angle_y = (0, 0), angle_z = (0, 0), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0], [0.5, 0.5], [0.25, 0.25], [0.125, 0.125], [0.0625, 0.0625], [0.03125, 0.03125], [0.015625, 0.015625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
+ "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f1c143c6e50>",
13
+ "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_2d.nnUNetDataLoader2D object at 0x7f1c0e9d5b50>",
14
+ "dataloader_val.num_processes": "6",
15
+ "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0], [0.5, 0.5], [0.25, 0.25], [0.125, 0.125], [0.0625, 0.0625], [0.03125, 0.03125], [0.015625, 0.015625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
+ "dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}",
17
+ "device": "cuda:0",
18
+ "disable_checkpointing": "False",
19
+ "fold": "0",
20
+ "folder_with_segs_from_previous_stage": "None",
21
+ "gpu_name": "NVIDIA GeForce GTX 1080 Ti",
22
+ "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7f1c0e4f3490>",
23
+ "hostname": "vipadmin-Z10PE-D16-WS",
24
+ "inference_allowed_mirroring_axes": "(0, 1)",
25
+ "initial_lr": "0.01",
26
+ "is_cascaded": "False",
27
+ "is_ddp": "False",
28
+ "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7f1c0e4f3650>",
29
+ "local_rank": "0",
30
+ "log_file": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/training_log_2023_11_6_13_13_08.txt",
31
+ "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7f1c0ebbc390>",
32
+ "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
+ "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7f1c0ea7b6d0>",
34
+ "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}, 'configuration': '2d', 'fold': 0, 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
+ "network": "PlainConvUNet",
36
+ "num_epochs": "1000",
37
+ "num_input_channels": "1",
38
+ "num_iterations_per_epoch": "250",
39
+ "num_val_iterations_per_epoch": "50",
40
+ "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
+ "output_folder": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0",
42
+ "output_folder_base": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d",
43
+ "oversample_foreground_percent": "0.33",
44
+ "plans_manager": "{'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}",
45
+ "preprocessed_dataset_folder": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/nnUNetPlans_2d",
46
+ "preprocessed_dataset_folder_base": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP",
47
+ "save_every": "50",
48
+ "torch_version": "2.0.1+cu117",
49
+ "unpack_dataset": "True",
50
+ "was_initialized": "True",
51
+ "weight_decay": "3e-05"
52
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/network_architecture ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ digraph {
2
+ graph [bgcolor="#FFFFFF" color="#000000" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" pad="1.0,0.5" rankdir=LR]
3
+ node [color="#000000" fillcolor="#E8E8E8" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" shape=box style=filled]
4
+ edge [color="#000000" fontcolor="#000000" fontname=Times fontsize=10 style=solid]
5
+ "/outputs/149" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
6
+ "/outputs/150" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
7
+ "/outputs/151" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
8
+ "/outputs/152" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
9
+ "/outputs/153" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
10
+ "/outputs/154" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
11
+ "/outputs/155" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
12
+ "/outputs/156" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
13
+ "/outputs/157" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
14
+ "/outputs/158" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
15
+ "/outputs/159" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
16
+ "/outputs/160" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
17
+ "/outputs/161" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
18
+ "/outputs/162" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
19
+ "/outputs/163" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
20
+ "/outputs/164" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
21
+ "/outputs/165" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
22
+ "/outputs/166" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
23
+ "/outputs/167" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
24
+ "/outputs/168" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
25
+ "/outputs/169" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
26
+ "/outputs/170" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
27
+ "/outputs/171" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
28
+ "/outputs/172" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
29
+ "/outputs/173" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
30
+ "/outputs/174" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
31
+ "/outputs/175" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
32
+ "/outputs/176" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
33
+ "/outputs/177" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
34
+ "/outputs/178" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
35
+ "/outputs/179" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
36
+ "/outputs/180" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
37
+ "/outputs/181" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
38
+ "/outputs/182" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
39
+ "/outputs/183" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
40
+ "/outputs/184" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
41
+ "/outputs/185" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
42
+ "/outputs/186" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
43
+ "/outputs/187" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
44
+ "/outputs/188" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
45
+ "/outputs/189" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
46
+ "/outputs/190" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
47
+ "/outputs/191" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [2, 2]</td></tr></table>>]
48
+ "/outputs/192" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
49
+ "/outputs/193" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
50
+ "/outputs/194" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
51
+ "/outputs/195" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
52
+ "/outputs/196" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
53
+ "/outputs/197" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
54
+ "/outputs/198" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
55
+ "/outputs/199" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
56
+ "/outputs/200" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
57
+ "/outputs/201" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
58
+ "/outputs/202" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
59
+ "/outputs/203" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
60
+ "/outputs/204" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
61
+ "/outputs/205" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
62
+ "/outputs/206" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
63
+ "/outputs/207" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
64
+ "/outputs/208" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
65
+ "/outputs/209" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
66
+ "/outputs/210" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
67
+ "/outputs/211" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
68
+ "/outputs/212" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
69
+ "/outputs/213" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
70
+ "/outputs/214" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
71
+ "/outputs/215" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
72
+ "/outputs/216" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
73
+ "/outputs/217" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
74
+ "/outputs/218" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
75
+ "/outputs/219" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
76
+ "/outputs/220" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
77
+ "/outputs/221" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
78
+ "/outputs/222" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
79
+ "/outputs/223" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
80
+ "/outputs/224" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
81
+ "/outputs/225" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
82
+ "/outputs/226" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
83
+ "/outputs/227" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
84
+ "/outputs/228" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
85
+ "/outputs/229" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
86
+ "/outputs/230" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
87
+ "/outputs/231" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
88
+ "/outputs/232" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
89
+ "/outputs/233" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
90
+ "/outputs/234" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
91
+ "/outputs/235" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
92
+ "/outputs/236" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
93
+ "/outputs/237" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
94
+ "/outputs/238" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
95
+ "/outputs/239" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
96
+ "/outputs/240" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
97
+ "/outputs/241" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
98
+ "/outputs/242" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
99
+ "/outputs/243" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
100
+ "/outputs/244" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
101
+ "/outputs/245" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
102
+ "/outputs/246" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
103
+ "/outputs/247" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
104
+ "/outputs/248" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
105
+ "/outputs/249" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
106
+ "/outputs/250" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
107
+ "/outputs/251" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2], stride: [2, 2]</td></tr></table>>]
108
+ "/outputs/252" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
109
+ "/outputs/253" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
110
+ "/outputs/254" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
111
+ "/outputs/255" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
112
+ "/outputs/256" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3], stride: [1, 1]</td></tr></table>>]
113
+ "/outputs/257" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
114
+ "/outputs/258" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
115
+ "/outputs/259" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1], stride: [1, 1]</td></tr></table>>]
116
+ "/outputs/149" -> "/outputs/150" [label="1x32x512x512"]
117
+ "/outputs/150" -> "/outputs/151" [label="1x32x512x512"]
118
+ "/outputs/151" -> "/outputs/152" [label="1x32x512x512"]
119
+ "/outputs/152" -> "/outputs/153" [label="1x32x512x512"]
120
+ "/outputs/153" -> "/outputs/154" [label="1x32x512x512"]
121
+ "/outputs/154" -> "/outputs/155" [label="1x32x512x512"]
122
+ "/outputs/154" -> "/outputs/252" [label="1x32x512x512"]
123
+ "/outputs/155" -> "/outputs/156" [label="1x64x256x256"]
124
+ "/outputs/156" -> "/outputs/157" [label="1x64x256x256"]
125
+ "/outputs/157" -> "/outputs/158" [label="1x64x256x256"]
126
+ "/outputs/158" -> "/outputs/159" [label="1x64x256x256"]
127
+ "/outputs/159" -> "/outputs/160" [label="1x64x256x256"]
128
+ "/outputs/160" -> "/outputs/161" [label="1x64x256x256"]
129
+ "/outputs/160" -> "/outputs/243" [label="1x64x256x256"]
130
+ "/outputs/161" -> "/outputs/162" [label="1x128x128x128"]
131
+ "/outputs/162" -> "/outputs/163" [label="1x128x128x128"]
132
+ "/outputs/163" -> "/outputs/164" [label="1x128x128x128"]
133
+ "/outputs/164" -> "/outputs/165" [label="1x128x128x128"]
134
+ "/outputs/165" -> "/outputs/166" [label="1x128x128x128"]
135
+ "/outputs/166" -> "/outputs/167" [label="1x128x128x128"]
136
+ "/outputs/166" -> "/outputs/234" [label="1x128x128x128"]
137
+ "/outputs/167" -> "/outputs/168" [label="1x256x64x64"]
138
+ "/outputs/168" -> "/outputs/169" [label="1x256x64x64"]
139
+ "/outputs/169" -> "/outputs/170" [label="1x256x64x64"]
140
+ "/outputs/170" -> "/outputs/171" [label="1x256x64x64"]
141
+ "/outputs/171" -> "/outputs/172" [label="1x256x64x64"]
142
+ "/outputs/172" -> "/outputs/173" [label="1x256x64x64"]
143
+ "/outputs/172" -> "/outputs/225" [label="1x256x64x64"]
144
+ "/outputs/173" -> "/outputs/174" [label="1x512x32x32"]
145
+ "/outputs/174" -> "/outputs/175" [label="1x512x32x32"]
146
+ "/outputs/175" -> "/outputs/176" [label="1x512x32x32"]
147
+ "/outputs/176" -> "/outputs/177" [label="1x512x32x32"]
148
+ "/outputs/177" -> "/outputs/178" [label="1x512x32x32"]
149
+ "/outputs/178" -> "/outputs/179" [label="1x512x32x32"]
150
+ "/outputs/178" -> "/outputs/216" [label="1x512x32x32"]
151
+ "/outputs/179" -> "/outputs/180" [label="1x512x16x16"]
152
+ "/outputs/180" -> "/outputs/181" [label="1x512x16x16"]
153
+ "/outputs/181" -> "/outputs/182" [label="1x512x16x16"]
154
+ "/outputs/182" -> "/outputs/183" [label="1x512x16x16"]
155
+ "/outputs/183" -> "/outputs/184" [label="1x512x16x16"]
156
+ "/outputs/184" -> "/outputs/185" [label="1x512x16x16"]
157
+ "/outputs/184" -> "/outputs/207" [label="1x512x16x16"]
158
+ "/outputs/185" -> "/outputs/186" [label="1x512x8x8"]
159
+ "/outputs/186" -> "/outputs/187" [label="1x512x8x8"]
160
+ "/outputs/187" -> "/outputs/188" [label="1x512x8x8"]
161
+ "/outputs/188" -> "/outputs/189" [label="1x512x8x8"]
162
+ "/outputs/189" -> "/outputs/190" [label="1x512x8x8"]
163
+ "/outputs/190" -> "/outputs/191" [label="1x512x8x8"]
164
+ "/outputs/190" -> "/outputs/198" [label="1x512x8x8"]
165
+ "/outputs/191" -> "/outputs/192" [label="1x512x4x4"]
166
+ "/outputs/192" -> "/outputs/193" [label="1x512x4x4"]
167
+ "/outputs/193" -> "/outputs/194" [label="1x512x4x4"]
168
+ "/outputs/194" -> "/outputs/195" [label="1x512x4x4"]
169
+ "/outputs/195" -> "/outputs/196" [label="1x512x4x4"]
170
+ "/outputs/196" -> "/outputs/197" [label="1x512x4x4"]
171
+ "/outputs/197" -> "/outputs/198" [label="1x512x8x8"]
172
+ "/outputs/198" -> "/outputs/199" [label="1x1024x8x8"]
173
+ "/outputs/199" -> "/outputs/200" [label="1x512x8x8"]
174
+ "/outputs/200" -> "/outputs/201" [label="1x512x8x8"]
175
+ "/outputs/201" -> "/outputs/202" [label="1x512x8x8"]
176
+ "/outputs/202" -> "/outputs/203" [label="1x512x8x8"]
177
+ "/outputs/203" -> "/outputs/204" [label="1x512x8x8"]
178
+ "/outputs/204" -> "/outputs/205" [label="1x512x8x8"]
179
+ "/outputs/204" -> "/outputs/206" [label="1x512x8x8"]
180
+ "/outputs/206" -> "/outputs/207" [label="1x512x16x16"]
181
+ "/outputs/207" -> "/outputs/208" [label="1x1024x16x16"]
182
+ "/outputs/208" -> "/outputs/209" [label="1x512x16x16"]
183
+ "/outputs/209" -> "/outputs/210" [label="1x512x16x16"]
184
+ "/outputs/210" -> "/outputs/211" [label="1x512x16x16"]
185
+ "/outputs/211" -> "/outputs/212" [label="1x512x16x16"]
186
+ "/outputs/212" -> "/outputs/213" [label="1x512x16x16"]
187
+ "/outputs/213" -> "/outputs/214" [label="1x512x16x16"]
188
+ "/outputs/213" -> "/outputs/215" [label="1x512x16x16"]
189
+ "/outputs/215" -> "/outputs/216" [label="1x512x32x32"]
190
+ "/outputs/216" -> "/outputs/217" [label="1x1024x32x32"]
191
+ "/outputs/217" -> "/outputs/218" [label="1x512x32x32"]
192
+ "/outputs/218" -> "/outputs/219" [label="1x512x32x32"]
193
+ "/outputs/219" -> "/outputs/220" [label="1x512x32x32"]
194
+ "/outputs/220" -> "/outputs/221" [label="1x512x32x32"]
195
+ "/outputs/221" -> "/outputs/222" [label="1x512x32x32"]
196
+ "/outputs/222" -> "/outputs/223" [label="1x512x32x32"]
197
+ "/outputs/222" -> "/outputs/224" [label="1x512x32x32"]
198
+ "/outputs/224" -> "/outputs/225" [label="1x256x64x64"]
199
+ "/outputs/225" -> "/outputs/226" [label="1x512x64x64"]
200
+ "/outputs/226" -> "/outputs/227" [label="1x256x64x64"]
201
+ "/outputs/227" -> "/outputs/228" [label="1x256x64x64"]
202
+ "/outputs/228" -> "/outputs/229" [label="1x256x64x64"]
203
+ "/outputs/229" -> "/outputs/230" [label="1x256x64x64"]
204
+ "/outputs/230" -> "/outputs/231" [label="1x256x64x64"]
205
+ "/outputs/231" -> "/outputs/232" [label="1x256x64x64"]
206
+ "/outputs/231" -> "/outputs/233" [label="1x256x64x64"]
207
+ "/outputs/233" -> "/outputs/234" [label="1x128x128x128"]
208
+ "/outputs/234" -> "/outputs/235" [label="1x256x128x128"]
209
+ "/outputs/235" -> "/outputs/236" [label="1x128x128x128"]
210
+ "/outputs/236" -> "/outputs/237" [label="1x128x128x128"]
211
+ "/outputs/237" -> "/outputs/238" [label="1x128x128x128"]
212
+ "/outputs/238" -> "/outputs/239" [label="1x128x128x128"]
213
+ "/outputs/239" -> "/outputs/240" [label="1x128x128x128"]
214
+ "/outputs/240" -> "/outputs/241" [label="1x128x128x128"]
215
+ "/outputs/240" -> "/outputs/242" [label="1x128x128x128"]
216
+ "/outputs/242" -> "/outputs/243" [label="1x64x256x256"]
217
+ "/outputs/243" -> "/outputs/244" [label="1x128x256x256"]
218
+ "/outputs/244" -> "/outputs/245" [label="1x64x256x256"]
219
+ "/outputs/245" -> "/outputs/246" [label="1x64x256x256"]
220
+ "/outputs/246" -> "/outputs/247" [label="1x64x256x256"]
221
+ "/outputs/247" -> "/outputs/248" [label="1x64x256x256"]
222
+ "/outputs/248" -> "/outputs/249" [label="1x64x256x256"]
223
+ "/outputs/249" -> "/outputs/250" [label="1x64x256x256"]
224
+ "/outputs/249" -> "/outputs/251" [label="1x64x256x256"]
225
+ "/outputs/251" -> "/outputs/252" [label="1x32x512x512"]
226
+ "/outputs/252" -> "/outputs/253" [label="1x64x512x512"]
227
+ "/outputs/253" -> "/outputs/254" [label="1x32x512x512"]
228
+ "/outputs/254" -> "/outputs/255" [label="1x32x512x512"]
229
+ "/outputs/255" -> "/outputs/256" [label="1x32x512x512"]
230
+ "/outputs/256" -> "/outputs/257" [label="1x32x512x512"]
231
+ "/outputs/257" -> "/outputs/258" [label="1x32x512x512"]
232
+ "/outputs/258" -> "/outputs/259" [label="1x32x512x512"]
233
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/progress.png ADDED
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/fold_0/training_log_2023_11_6_13_13_08.txt ADDED
@@ -0,0 +1,1066 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 2d
10
+ {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}
14
+
15
+ 2023-11-06 13:13:28.802509: unpacking dataset...
16
+ 2023-11-06 13:15:00.613914: unpacking done...
17
+ 2023-11-06 13:15:00.614366: do_dummy_2d_data_aug: False
18
+ 2023-11-06 13:15:00.614915: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/splits_final.json
19
+ 2023-11-06 13:15:00.832952: The split file contains 5 splits.
20
+ 2023-11-06 13:15:00.833127: Desired fold for training: 0
21
+ 2023-11-06 13:15:00.833248: This split has 48 training and 12 validation cases.
22
+ 2023-11-06 13:15:10.900282: Unable to plot network architecture:
23
+ 2023-11-06 13:15:10.900499: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-11-06 13:15:10.940661:
25
+ 2023-11-06 13:15:10.940779: Epoch 0
26
+ 2023-11-06 13:15:10.940948: Current learning rate: 0.01
27
+ 2023-11-06 13:21:03.870656: train_loss 0.0348
28
+ 2023-11-06 13:21:03.870874: val_loss -0.0304
29
+ 2023-11-06 13:21:03.870951: Pseudo dice [0.0]
30
+ 2023-11-06 13:21:03.871047: Epoch time: 352.93 s
31
+ 2023-11-06 13:21:03.871126: Yayy! New best EMA pseudo Dice: 0.0
32
+ 2023-11-06 13:21:05.607379:
33
+ 2023-11-06 13:21:05.607551: Epoch 1
34
+ 2023-11-06 13:21:05.607653: Current learning rate: 0.00999
35
+ 2023-11-06 13:26:52.415418: train_loss -0.1505
36
+ 2023-11-06 13:26:52.415561: val_loss -0.5408
37
+ 2023-11-06 13:26:52.415637: Pseudo dice [0.5562]
38
+ 2023-11-06 13:26:52.415717: Epoch time: 346.81 s
39
+ 2023-11-06 13:26:52.415787: Yayy! New best EMA pseudo Dice: 0.0556
40
+ 2023-11-06 13:26:56.768546:
41
+ 2023-11-06 13:26:56.768665: Epoch 2
42
+ 2023-11-06 13:26:56.768766: Current learning rate: 0.00998
43
+ 2023-11-06 13:32:45.617573: train_loss -0.6659
44
+ 2023-11-06 13:32:45.617728: val_loss -0.7335
45
+ 2023-11-06 13:32:45.617804: Pseudo dice [0.7798]
46
+ 2023-11-06 13:32:45.617886: Epoch time: 348.85 s
47
+ 2023-11-06 13:32:45.617976: Yayy! New best EMA pseudo Dice: 0.128
48
+ 2023-11-06 13:32:49.959194:
49
+ 2023-11-06 13:32:49.959360: Epoch 3
50
+ 2023-11-06 13:32:49.959532: Current learning rate: 0.00997
51
+ 2023-11-06 13:38:38.253758: train_loss -0.7329
52
+ 2023-11-06 13:38:38.253912: val_loss -0.7511
53
+ 2023-11-06 13:38:38.253989: Pseudo dice [0.7852]
54
+ 2023-11-06 13:38:38.254071: Epoch time: 348.3 s
55
+ 2023-11-06 13:38:38.254141: Yayy! New best EMA pseudo Dice: 0.1938
56
+ 2023-11-06 13:38:42.641262:
57
+ 2023-11-06 13:38:42.641549: Epoch 4
58
+ 2023-11-06 13:38:42.641718: Current learning rate: 0.00996
59
+ 2023-11-06 13:44:30.913458: train_loss -0.7596
60
+ 2023-11-06 13:44:30.913604: val_loss -0.7813
61
+ 2023-11-06 13:44:30.913681: Pseudo dice [0.8119]
62
+ 2023-11-06 13:44:30.913763: Epoch time: 348.27 s
63
+ 2023-11-06 13:44:30.913832: Yayy! New best EMA pseudo Dice: 0.2556
64
+ 2023-11-06 13:44:35.153714:
65
+ 2023-11-06 13:44:35.153823: Epoch 5
66
+ 2023-11-06 13:44:35.153926: Current learning rate: 0.00995
67
+ 2023-11-06 13:50:23.506645: train_loss -0.7621
68
+ 2023-11-06 13:50:23.506818: val_loss -0.7807
69
+ 2023-11-06 13:50:23.506895: Pseudo dice [0.8153]
70
+ 2023-11-06 13:50:23.506977: Epoch time: 348.35 s
71
+ 2023-11-06 13:50:23.507046: Yayy! New best EMA pseudo Dice: 0.3115
72
+ 2023-11-06 13:50:27.673306:
73
+ 2023-11-06 13:50:27.673416: Epoch 6
74
+ 2023-11-06 13:50:27.673517: Current learning rate: 0.00995
75
+ 2023-11-06 13:56:16.041261: train_loss -0.791
76
+ 2023-11-06 13:56:16.041406: val_loss -0.79
77
+ 2023-11-06 13:56:16.041487: Pseudo dice [0.8185]
78
+ 2023-11-06 13:56:16.041569: Epoch time: 348.37 s
79
+ 2023-11-06 13:56:16.041637: Yayy! New best EMA pseudo Dice: 0.3622
80
+ 2023-11-06 13:56:20.389979:
81
+ 2023-11-06 13:56:20.390171: Epoch 7
82
+ 2023-11-06 13:56:20.390300: Current learning rate: 0.00994
83
+ 2023-11-06 14:02:08.574395: train_loss -0.7891
84
+ 2023-11-06 14:02:08.574591: val_loss -0.8361
85
+ 2023-11-06 14:02:08.574669: Pseudo dice [0.8644]
86
+ 2023-11-06 14:02:08.574764: Epoch time: 348.19 s
87
+ 2023-11-06 14:02:08.574836: Yayy! New best EMA pseudo Dice: 0.4125
88
+ 2023-11-06 14:02:12.834462:
89
+ 2023-11-06 14:02:12.834615: Epoch 8
90
+ 2023-11-06 14:02:12.834726: Current learning rate: 0.00993
91
+ 2023-11-06 14:08:01.192198: train_loss -0.8022
92
+ 2023-11-06 14:08:01.192355: val_loss -0.8143
93
+ 2023-11-06 14:08:01.192430: Pseudo dice [0.8384]
94
+ 2023-11-06 14:08:01.192512: Epoch time: 348.36 s
95
+ 2023-11-06 14:08:01.192580: Yayy! New best EMA pseudo Dice: 0.455
96
+ 2023-11-06 14:08:05.507637:
97
+ 2023-11-06 14:08:05.507843: Epoch 9
98
+ 2023-11-06 14:08:05.507995: Current learning rate: 0.00992
99
+ 2023-11-06 14:13:53.792487: train_loss -0.8095
100
+ 2023-11-06 14:13:53.792642: val_loss -0.8327
101
+ 2023-11-06 14:13:53.792728: Pseudo dice [0.8647]
102
+ 2023-11-06 14:13:53.792809: Epoch time: 348.29 s
103
+ 2023-11-06 14:13:53.792878: Yayy! New best EMA pseudo Dice: 0.496
104
+ 2023-11-06 14:13:58.139182:
105
+ 2023-11-06 14:13:58.139301: Epoch 10
106
+ 2023-11-06 14:13:58.139415: Current learning rate: 0.00991
107
+ 2023-11-06 14:19:46.407692: train_loss -0.8151
108
+ 2023-11-06 14:19:46.407889: val_loss -0.8305
109
+ 2023-11-06 14:19:46.407965: Pseudo dice [0.859]
110
+ 2023-11-06 14:19:46.408045: Epoch time: 348.27 s
111
+ 2023-11-06 14:19:46.408113: Yayy! New best EMA pseudo Dice: 0.5323
112
+ 2023-11-06 14:19:50.563425:
113
+ 2023-11-06 14:19:50.563558: Epoch 11
114
+ 2023-11-06 14:19:50.563658: Current learning rate: 0.0099
115
+ 2023-11-06 14:25:39.356401: train_loss -0.824
116
+ 2023-11-06 14:25:39.356591: val_loss -0.8076
117
+ 2023-11-06 14:25:39.356667: Pseudo dice [0.8393]
118
+ 2023-11-06 14:25:39.356749: Epoch time: 348.79 s
119
+ 2023-11-06 14:25:39.356818: Yayy! New best EMA pseudo Dice: 0.563
120
+ 2023-11-06 14:25:43.497167:
121
+ 2023-11-06 14:25:43.497332: Epoch 12
122
+ 2023-11-06 14:25:43.497450: Current learning rate: 0.00989
123
+ 2023-11-06 14:31:31.793675: train_loss -0.8206
124
+ 2023-11-06 14:31:31.793839: val_loss -0.8211
125
+ 2023-11-06 14:31:31.793914: Pseudo dice [0.8538]
126
+ 2023-11-06 14:31:31.793994: Epoch time: 348.3 s
127
+ 2023-11-06 14:31:31.794062: Yayy! New best EMA pseudo Dice: 0.5921
128
+ 2023-11-06 14:31:36.150037:
129
+ 2023-11-06 14:31:36.150145: Epoch 13
130
+ 2023-11-06 14:31:36.150244: Current learning rate: 0.00988
131
+ 2023-11-06 14:37:24.746708: train_loss -0.8277
132
+ 2023-11-06 14:37:24.746860: val_loss -0.8242
133
+ 2023-11-06 14:37:24.746934: Pseudo dice [0.8557]
134
+ 2023-11-06 14:37:24.747014: Epoch time: 348.6 s
135
+ 2023-11-06 14:37:24.747082: Yayy! New best EMA pseudo Dice: 0.6184
136
+ 2023-11-06 14:37:29.126144:
137
+ 2023-11-06 14:37:29.126324: Epoch 14
138
+ 2023-11-06 14:37:29.126425: Current learning rate: 0.00987
139
+ 2023-11-06 14:43:17.103191: train_loss -0.8218
140
+ 2023-11-06 14:43:17.103347: val_loss -0.8095
141
+ 2023-11-06 14:43:17.103426: Pseudo dice [0.8336]
142
+ 2023-11-06 14:43:17.103511: Epoch time: 347.98 s
143
+ 2023-11-06 14:43:17.103581: Yayy! New best EMA pseudo Dice: 0.64
144
+ 2023-11-06 14:43:21.522768:
145
+ 2023-11-06 14:43:21.522882: Epoch 15
146
+ 2023-11-06 14:43:21.522983: Current learning rate: 0.00986
147
+ 2023-11-06 14:49:09.740595: train_loss -0.8367
148
+ 2023-11-06 14:49:09.740735: val_loss -0.8429
149
+ 2023-11-06 14:49:09.740824: Pseudo dice [0.8705]
150
+ 2023-11-06 14:49:09.740904: Epoch time: 348.22 s
151
+ 2023-11-06 14:49:09.740974: Yayy! New best EMA pseudo Dice: 0.663
152
+ 2023-11-06 14:49:13.915126:
153
+ 2023-11-06 14:49:13.915297: Epoch 16
154
+ 2023-11-06 14:49:13.915401: Current learning rate: 0.00986
155
+ 2023-11-06 14:55:02.656214: train_loss -0.8303
156
+ 2023-11-06 14:55:02.656378: val_loss -0.8373
157
+ 2023-11-06 14:55:02.656460: Pseudo dice [0.8633]
158
+ 2023-11-06 14:55:02.656540: Epoch time: 348.74 s
159
+ 2023-11-06 14:55:02.656608: Yayy! New best EMA pseudo Dice: 0.683
160
+ 2023-11-06 14:55:06.917603:
161
+ 2023-11-06 14:55:06.917858: Epoch 17
162
+ 2023-11-06 14:55:06.918054: Current learning rate: 0.00985
163
+ 2023-11-06 15:00:55.067600: train_loss -0.8344
164
+ 2023-11-06 15:00:55.067766: val_loss -0.8468
165
+ 2023-11-06 15:00:55.067847: Pseudo dice [0.8721]
166
+ 2023-11-06 15:00:55.067933: Epoch time: 348.15 s
167
+ 2023-11-06 15:00:55.068005: Yayy! New best EMA pseudo Dice: 0.7019
168
+ 2023-11-06 15:00:59.470479:
169
+ 2023-11-06 15:00:59.470586: Epoch 18
170
+ 2023-11-06 15:00:59.470715: Current learning rate: 0.00984
171
+ 2023-11-06 15:06:47.194258: train_loss -0.8332
172
+ 2023-11-06 15:06:47.194415: val_loss -0.8075
173
+ 2023-11-06 15:06:47.194510: Pseudo dice [0.8373]
174
+ 2023-11-06 15:06:47.194605: Epoch time: 347.72 s
175
+ 2023-11-06 15:06:47.194696: Yayy! New best EMA pseudo Dice: 0.7155
176
+ 2023-11-06 15:06:51.755661:
177
+ 2023-11-06 15:06:51.755768: Epoch 19
178
+ 2023-11-06 15:06:51.755866: Current learning rate: 0.00983
179
+ 2023-11-06 15:12:40.349596: train_loss -0.8241
180
+ 2023-11-06 15:12:40.349748: val_loss -0.8207
181
+ 2023-11-06 15:12:40.349823: Pseudo dice [0.8472]
182
+ 2023-11-06 15:12:40.349904: Epoch time: 348.6 s
183
+ 2023-11-06 15:12:40.349972: Yayy! New best EMA pseudo Dice: 0.7287
184
+ 2023-11-06 15:12:44.525051:
185
+ 2023-11-06 15:12:44.525233: Epoch 20
186
+ 2023-11-06 15:12:44.525389: Current learning rate: 0.00982
187
+ 2023-11-06 15:18:32.910087: train_loss -0.8386
188
+ 2023-11-06 15:18:32.910239: val_loss -0.837
189
+ 2023-11-06 15:18:32.910345: Pseudo dice [0.8637]
190
+ 2023-11-06 15:18:32.910425: Epoch time: 348.39 s
191
+ 2023-11-06 15:18:32.910493: Yayy! New best EMA pseudo Dice: 0.7422
192
+ 2023-11-06 15:18:37.299002:
193
+ 2023-11-06 15:18:37.299156: Epoch 21
194
+ 2023-11-06 15:18:37.299279: Current learning rate: 0.00981
195
+ 2023-11-06 15:24:24.749255: train_loss -0.8419
196
+ 2023-11-06 15:24:24.749415: val_loss -0.8357
197
+ 2023-11-06 15:24:24.749510: Pseudo dice [0.8649]
198
+ 2023-11-06 15:24:24.749609: Epoch time: 347.45 s
199
+ 2023-11-06 15:24:24.749693: Yayy! New best EMA pseudo Dice: 0.7544
200
+ 2023-11-06 15:24:28.937052:
201
+ 2023-11-06 15:24:28.937247: Epoch 22
202
+ 2023-11-06 15:24:28.937376: Current learning rate: 0.0098
203
+ 2023-11-06 15:30:16.978192: train_loss -0.8486
204
+ 2023-11-06 15:30:16.978347: val_loss -0.8397
205
+ 2023-11-06 15:30:16.978422: Pseudo dice [0.8646]
206
+ 2023-11-06 15:30:16.978504: Epoch time: 348.04 s
207
+ 2023-11-06 15:30:16.978570: Yayy! New best EMA pseudo Dice: 0.7654
208
+ 2023-11-06 15:30:22.319774:
209
+ 2023-11-06 15:30:22.319953: Epoch 23
210
+ 2023-11-06 15:30:22.320115: Current learning rate: 0.00979
211
+ 2023-11-06 15:36:09.800687: train_loss -0.8519
212
+ 2023-11-06 15:36:09.800877: val_loss -0.8302
213
+ 2023-11-06 15:36:09.800952: Pseudo dice [0.8561]
214
+ 2023-11-06 15:36:09.801033: Epoch time: 347.48 s
215
+ 2023-11-06 15:36:09.801103: Yayy! New best EMA pseudo Dice: 0.7745
216
+ 2023-11-06 15:36:13.928569:
217
+ 2023-11-06 15:36:13.928675: Epoch 24
218
+ 2023-11-06 15:36:13.928787: Current learning rate: 0.00978
219
+ 2023-11-06 15:42:00.977297: train_loss -0.8476
220
+ 2023-11-06 15:42:00.977465: val_loss -0.8434
221
+ 2023-11-06 15:42:00.977544: Pseudo dice [0.8638]
222
+ 2023-11-06 15:42:00.977631: Epoch time: 347.05 s
223
+ 2023-11-06 15:42:00.977699: Yayy! New best EMA pseudo Dice: 0.7834
224
+ 2023-11-06 15:42:05.148325:
225
+ 2023-11-06 15:42:05.148434: Epoch 25
226
+ 2023-11-06 15:42:05.148533: Current learning rate: 0.00977
227
+ 2023-11-06 15:47:52.996786: train_loss -0.8506
228
+ 2023-11-06 15:47:52.996922: val_loss -0.825
229
+ 2023-11-06 15:47:52.997010: Pseudo dice [0.8549]
230
+ 2023-11-06 15:47:52.997092: Epoch time: 347.85 s
231
+ 2023-11-06 15:47:52.997161: Yayy! New best EMA pseudo Dice: 0.7906
232
+ 2023-11-06 15:47:57.325193:
233
+ 2023-11-06 15:47:57.325376: Epoch 26
234
+ 2023-11-06 15:47:57.325481: Current learning rate: 0.00977
235
+ 2023-11-06 15:53:44.927989: train_loss -0.8543
236
+ 2023-11-06 15:53:44.928154: val_loss -0.8316
237
+ 2023-11-06 15:53:44.928229: Pseudo dice [0.8515]
238
+ 2023-11-06 15:53:44.928311: Epoch time: 347.6 s
239
+ 2023-11-06 15:53:44.928380: Yayy! New best EMA pseudo Dice: 0.7967
240
+ 2023-11-06 15:53:49.272385:
241
+ 2023-11-06 15:53:49.272493: Epoch 27
242
+ 2023-11-06 15:53:49.272592: Current learning rate: 0.00976
243
+ 2023-11-06 15:59:36.309584: train_loss -0.8546
244
+ 2023-11-06 15:59:36.309735: val_loss -0.842
245
+ 2023-11-06 15:59:36.309810: Pseudo dice [0.8696]
246
+ 2023-11-06 15:59:36.309892: Epoch time: 347.04 s
247
+ 2023-11-06 15:59:36.309958: Yayy! New best EMA pseudo Dice: 0.804
248
+ 2023-11-06 15:59:40.570262:
249
+ 2023-11-06 15:59:40.570367: Epoch 28
250
+ 2023-11-06 15:59:40.570468: Current learning rate: 0.00975
251
+ 2023-11-06 16:05:26.928417: train_loss -0.8592
252
+ 2023-11-06 16:05:26.928568: val_loss -0.8382
253
+ 2023-11-06 16:05:26.928642: Pseudo dice [0.8677]
254
+ 2023-11-06 16:05:26.928722: Epoch time: 346.36 s
255
+ 2023-11-06 16:05:26.928801: Yayy! New best EMA pseudo Dice: 0.8103
256
+ 2023-11-06 16:05:31.233210:
257
+ 2023-11-06 16:05:31.233335: Epoch 29
258
+ 2023-11-06 16:05:31.233435: Current learning rate: 0.00974
259
+ 2023-11-06 16:11:18.284093: train_loss -0.8565
260
+ 2023-11-06 16:11:18.284294: val_loss -0.8399
261
+ 2023-11-06 16:11:18.284371: Pseudo dice [0.8618]
262
+ 2023-11-06 16:11:18.284455: Epoch time: 347.05 s
263
+ 2023-11-06 16:11:18.284523: Yayy! New best EMA pseudo Dice: 0.8155
264
+ 2023-11-06 16:11:22.544678:
265
+ 2023-11-06 16:11:22.544789: Epoch 30
266
+ 2023-11-06 16:11:22.544901: Current learning rate: 0.00973
267
+ 2023-11-06 16:17:09.310493: train_loss -0.8574
268
+ 2023-11-06 16:17:09.310651: val_loss -0.8474
269
+ 2023-11-06 16:17:09.310743: Pseudo dice [0.8724]
270
+ 2023-11-06 16:17:09.310829: Epoch time: 346.77 s
271
+ 2023-11-06 16:17:09.310901: Yayy! New best EMA pseudo Dice: 0.8212
272
+ 2023-11-06 16:17:13.927480:
273
+ 2023-11-06 16:17:13.927665: Epoch 31
274
+ 2023-11-06 16:17:13.927770: Current learning rate: 0.00972
275
+ 2023-11-06 16:23:00.794860: train_loss -0.8609
276
+ 2023-11-06 16:23:00.795005: val_loss -0.8461
277
+ 2023-11-06 16:23:00.795079: Pseudo dice [0.8779]
278
+ 2023-11-06 16:23:00.795160: Epoch time: 346.87 s
279
+ 2023-11-06 16:23:00.795227: Yayy! New best EMA pseudo Dice: 0.8268
280
+ 2023-11-06 16:23:05.627348:
281
+ 2023-11-06 16:23:05.627468: Epoch 32
282
+ 2023-11-06 16:23:05.627596: Current learning rate: 0.00971
283
+ 2023-11-06 16:28:52.193335: train_loss -0.8619
284
+ 2023-11-06 16:28:52.193494: val_loss -0.84
285
+ 2023-11-06 16:28:52.193573: Pseudo dice [0.8628]
286
+ 2023-11-06 16:28:52.193657: Epoch time: 346.57 s
287
+ 2023-11-06 16:28:52.193730: Yayy! New best EMA pseudo Dice: 0.8304
288
+ 2023-11-06 16:28:56.576761:
289
+ 2023-11-06 16:28:56.576892: Epoch 33
290
+ 2023-11-06 16:28:56.577016: Current learning rate: 0.0097
291
+ 2023-11-06 16:34:44.099342: train_loss -0.8612
292
+ 2023-11-06 16:34:44.099506: val_loss -0.8527
293
+ 2023-11-06 16:34:44.099581: Pseudo dice [0.8824]
294
+ 2023-11-06 16:34:44.099661: Epoch time: 347.52 s
295
+ 2023-11-06 16:34:44.099729: Yayy! New best EMA pseudo Dice: 0.8356
296
+ 2023-11-06 16:34:48.511588:
297
+ 2023-11-06 16:34:48.511715: Epoch 34
298
+ 2023-11-06 16:34:48.511816: Current learning rate: 0.00969
299
+ 2023-11-06 16:40:35.622980: train_loss -0.8604
300
+ 2023-11-06 16:40:35.623144: val_loss -0.8235
301
+ 2023-11-06 16:40:35.623220: Pseudo dice [0.8498]
302
+ 2023-11-06 16:40:35.623301: Epoch time: 347.11 s
303
+ 2023-11-06 16:40:35.623369: Yayy! New best EMA pseudo Dice: 0.8371
304
+ 2023-11-06 16:40:39.667731:
305
+ 2023-11-06 16:40:39.667855: Epoch 35
306
+ 2023-11-06 16:40:39.667958: Current learning rate: 0.00968
307
+ 2023-11-06 16:46:26.757342: train_loss -0.8451
308
+ 2023-11-06 16:46:26.757509: val_loss -0.8442
309
+ 2023-11-06 16:46:26.757584: Pseudo dice [0.872]
310
+ 2023-11-06 16:46:26.757663: Epoch time: 347.09 s
311
+ 2023-11-06 16:46:26.757730: Yayy! New best EMA pseudo Dice: 0.8405
312
+ 2023-11-06 16:46:31.185264:
313
+ 2023-11-06 16:46:31.185380: Epoch 36
314
+ 2023-11-06 16:46:31.185481: Current learning rate: 0.00968
315
+ 2023-11-06 16:52:18.064841: train_loss -0.8565
316
+ 2023-11-06 16:52:18.065006: val_loss -0.8599
317
+ 2023-11-06 16:52:18.065141: Pseudo dice [0.884]
318
+ 2023-11-06 16:52:18.065224: Epoch time: 346.88 s
319
+ 2023-11-06 16:52:18.065291: Yayy! New best EMA pseudo Dice: 0.8449
320
+ 2023-11-06 16:52:22.148149:
321
+ 2023-11-06 16:52:22.148263: Epoch 37
322
+ 2023-11-06 16:52:22.148363: Current learning rate: 0.00967
323
+ 2023-11-06 16:58:08.059065: train_loss -0.8607
324
+ 2023-11-06 16:58:08.059224: val_loss -0.8207
325
+ 2023-11-06 16:58:08.059306: Pseudo dice [0.8391]
326
+ 2023-11-06 16:58:08.059393: Epoch time: 345.91 s
327
+ 2023-11-06 16:58:09.289394:
328
+ 2023-11-06 16:58:09.289495: Epoch 38
329
+ 2023-11-06 16:58:09.289607: Current learning rate: 0.00966
330
+ 2023-11-06 17:03:56.389370: train_loss -0.8644
331
+ 2023-11-06 17:03:56.389533: val_loss -0.8446
332
+ 2023-11-06 17:03:56.389608: Pseudo dice [0.8697]
333
+ 2023-11-06 17:03:56.389689: Epoch time: 347.1 s
334
+ 2023-11-06 17:03:56.389758: Yayy! New best EMA pseudo Dice: 0.8468
335
+ 2023-11-06 17:04:00.624535:
336
+ 2023-11-06 17:04:00.624654: Epoch 39
337
+ 2023-11-06 17:04:00.624757: Current learning rate: 0.00965
338
+ 2023-11-06 17:09:47.187483: train_loss -0.8661
339
+ 2023-11-06 17:09:47.187651: val_loss -0.8558
340
+ 2023-11-06 17:09:47.187726: Pseudo dice [0.8843]
341
+ 2023-11-06 17:09:47.187806: Epoch time: 346.56 s
342
+ 2023-11-06 17:09:47.187879: Yayy! New best EMA pseudo Dice: 0.8506
343
+ 2023-11-06 17:09:51.400835:
344
+ 2023-11-06 17:09:51.400993: Epoch 40
345
+ 2023-11-06 17:09:51.401096: Current learning rate: 0.00964
346
+ 2023-11-06 17:15:37.917669: train_loss -0.8625
347
+ 2023-11-06 17:15:37.917822: val_loss -0.8535
348
+ 2023-11-06 17:15:37.917898: Pseudo dice [0.8735]
349
+ 2023-11-06 17:15:37.917987: Epoch time: 346.52 s
350
+ 2023-11-06 17:15:37.918060: Yayy! New best EMA pseudo Dice: 0.8529
351
+ 2023-11-06 17:15:42.126110:
352
+ 2023-11-06 17:15:42.126215: Epoch 41
353
+ 2023-11-06 17:15:42.126312: Current learning rate: 0.00963
354
+ 2023-11-06 17:21:27.873040: train_loss -0.8673
355
+ 2023-11-06 17:21:27.873201: val_loss -0.8652
356
+ 2023-11-06 17:21:27.873275: Pseudo dice [0.8924]
357
+ 2023-11-06 17:21:27.873356: Epoch time: 345.75 s
358
+ 2023-11-06 17:21:27.873423: Yayy! New best EMA pseudo Dice: 0.8568
359
+ 2023-11-06 17:21:32.022800:
360
+ 2023-11-06 17:21:32.022915: Epoch 42
361
+ 2023-11-06 17:21:32.023012: Current learning rate: 0.00962
362
+ 2023-11-06 17:27:18.094613: train_loss -0.8626
363
+ 2023-11-06 17:27:18.094796: val_loss -0.8518
364
+ 2023-11-06 17:27:18.094873: Pseudo dice [0.8829]
365
+ 2023-11-06 17:27:18.094956: Epoch time: 346.07 s
366
+ 2023-11-06 17:27:18.095026: Yayy! New best EMA pseudo Dice: 0.8594
367
+ 2023-11-06 17:27:23.461594:
368
+ 2023-11-06 17:27:23.461825: Epoch 43
369
+ 2023-11-06 17:27:23.461993: Current learning rate: 0.00961
370
+ 2023-11-06 17:33:08.676088: train_loss -0.8681
371
+ 2023-11-06 17:33:08.676230: val_loss -0.8466
372
+ 2023-11-06 17:33:08.676320: Pseudo dice [0.8722]
373
+ 2023-11-06 17:33:08.676508: Epoch time: 345.22 s
374
+ 2023-11-06 17:33:08.676633: Yayy! New best EMA pseudo Dice: 0.8607
375
+ 2023-11-06 17:33:12.694206:
376
+ 2023-11-06 17:33:12.694390: Epoch 44
377
+ 2023-11-06 17:33:12.694529: Current learning rate: 0.0096
378
+ 2023-11-06 17:38:58.956172: train_loss -0.8624
379
+ 2023-11-06 17:38:58.956326: val_loss -0.8241
380
+ 2023-11-06 17:38:58.956401: Pseudo dice [0.8488]
381
+ 2023-11-06 17:38:58.956481: Epoch time: 346.26 s
382
+ 2023-11-06 17:39:00.148017:
383
+ 2023-11-06 17:39:00.148174: Epoch 45
384
+ 2023-11-06 17:39:00.148306: Current learning rate: 0.00959
385
+ 2023-11-06 17:44:44.935666: train_loss -0.8629
386
+ 2023-11-06 17:44:44.935838: val_loss -0.8435
387
+ 2023-11-06 17:44:44.935911: Pseudo dice [0.8718]
388
+ 2023-11-06 17:44:44.935992: Epoch time: 344.79 s
389
+ 2023-11-06 17:44:44.936060: Yayy! New best EMA pseudo Dice: 0.8607
390
+ 2023-11-06 17:44:49.101461:
391
+ 2023-11-06 17:44:49.101594: Epoch 46
392
+ 2023-11-06 17:44:49.101709: Current learning rate: 0.00959
393
+ 2023-11-06 17:50:34.429782: train_loss -0.862
394
+ 2023-11-06 17:50:34.429919: val_loss -0.8481
395
+ 2023-11-06 17:50:34.430018: Pseudo dice [0.8672]
396
+ 2023-11-06 17:50:34.430097: Epoch time: 345.33 s
397
+ 2023-11-06 17:50:34.430166: Yayy! New best EMA pseudo Dice: 0.8614
398
+ 2023-11-06 17:50:38.462873:
399
+ 2023-11-06 17:50:38.463033: Epoch 47
400
+ 2023-11-06 17:50:38.463133: Current learning rate: 0.00958
401
+ 2023-11-06 17:56:23.844974: train_loss -0.8634
402
+ 2023-11-06 17:56:23.845133: val_loss -0.8459
403
+ 2023-11-06 17:56:23.845207: Pseudo dice [0.8729]
404
+ 2023-11-06 17:56:23.845287: Epoch time: 345.38 s
405
+ 2023-11-06 17:56:23.845353: Yayy! New best EMA pseudo Dice: 0.8625
406
+ 2023-11-06 17:56:29.381148:
407
+ 2023-11-06 17:56:29.381268: Epoch 48
408
+ 2023-11-06 17:56:29.381370: Current learning rate: 0.00957
409
+ 2023-11-06 18:02:16.288126: train_loss -0.8642
410
+ 2023-11-06 18:02:16.288275: val_loss -0.8535
411
+ 2023-11-06 18:02:16.288348: Pseudo dice [0.881]
412
+ 2023-11-06 18:02:16.288428: Epoch time: 346.91 s
413
+ 2023-11-06 18:02:16.288494: Yayy! New best EMA pseudo Dice: 0.8644
414
+ 2023-11-06 18:02:21.580729:
415
+ 2023-11-06 18:02:21.580965: Epoch 49
416
+ 2023-11-06 18:02:21.581068: Current learning rate: 0.00956
417
+ 2023-11-06 18:07:21.275004: train_loss -0.869
418
+ 2023-11-06 18:07:21.275177: val_loss -0.8512
419
+ 2023-11-06 18:07:21.275255: Pseudo dice [0.8767]
420
+ 2023-11-06 18:07:21.275334: Epoch time: 299.7 s
421
+ 2023-11-06 18:07:21.674096: Yayy! New best EMA pseudo Dice: 0.8656
422
+ 2023-11-06 18:07:25.821005:
423
+ 2023-11-06 18:07:25.821121: Epoch 50
424
+ 2023-11-06 18:07:25.821221: Current learning rate: 0.00955
425
+ 2023-11-06 18:11:28.166133: train_loss -0.872
426
+ 2023-11-06 18:11:28.166332: val_loss -0.8576
427
+ 2023-11-06 18:11:28.166409: Pseudo dice [0.8841]
428
+ 2023-11-06 18:11:28.166490: Epoch time: 242.35 s
429
+ 2023-11-06 18:11:28.166558: Yayy! New best EMA pseudo Dice: 0.8675
430
+ 2023-11-06 18:11:32.545946:
431
+ 2023-11-06 18:11:32.546069: Epoch 51
432
+ 2023-11-06 18:11:32.546170: Current learning rate: 0.00954
433
+ 2023-11-06 18:15:33.250956: train_loss -0.8678
434
+ 2023-11-06 18:15:33.251112: val_loss -0.8343
435
+ 2023-11-06 18:15:33.251188: Pseudo dice [0.8556]
436
+ 2023-11-06 18:15:33.251268: Epoch time: 240.71 s
437
+ 2023-11-06 18:15:34.465760:
438
+ 2023-11-06 18:15:34.465860: Epoch 52
439
+ 2023-11-06 18:15:34.465982: Current learning rate: 0.00953
440
+ 2023-11-06 18:19:35.028157: train_loss -0.8703
441
+ 2023-11-06 18:19:35.028330: val_loss -0.8351
442
+ 2023-11-06 18:19:35.028405: Pseudo dice [0.8524]
443
+ 2023-11-06 18:19:35.028486: Epoch time: 240.56 s
444
+ 2023-11-06 18:19:36.251864:
445
+ 2023-11-06 18:19:36.252039: Epoch 53
446
+ 2023-11-06 18:19:36.252138: Current learning rate: 0.00952
447
+ 2023-11-06 18:23:39.061217: train_loss -0.868
448
+ 2023-11-06 18:23:39.061371: val_loss -0.8437
449
+ 2023-11-06 18:23:39.061447: Pseudo dice [0.8694]
450
+ 2023-11-06 18:23:39.061527: Epoch time: 242.81 s
451
+ 2023-11-06 18:23:40.296390:
452
+ 2023-11-06 18:23:40.296501: Epoch 54
453
+ 2023-11-06 18:23:40.296600: Current learning rate: 0.00951
454
+ 2023-11-06 18:27:42.341584: train_loss -0.877
455
+ 2023-11-06 18:27:42.341748: val_loss -0.846
456
+ 2023-11-06 18:27:42.341822: Pseudo dice [0.8711]
457
+ 2023-11-06 18:27:42.341950: Epoch time: 242.05 s
458
+ 2023-11-06 18:27:43.571760:
459
+ 2023-11-06 18:27:43.571880: Epoch 55
460
+ 2023-11-06 18:27:43.571980: Current learning rate: 0.0095
461
+ 2023-11-06 18:31:00.481953: train_loss -0.8726
462
+ 2023-11-06 18:31:00.482113: val_loss -0.8625
463
+ 2023-11-06 18:31:00.482187: Pseudo dice [0.8882]
464
+ 2023-11-06 18:31:00.482269: Epoch time: 196.91 s
465
+ 2023-11-06 18:31:00.482337: Yayy! New best EMA pseudo Dice: 0.8681
466
+ 2023-11-06 18:31:04.215248:
467
+ 2023-11-06 18:31:04.215453: Epoch 56
468
+ 2023-11-06 18:31:04.215628: Current learning rate: 0.00949
469
+ 2023-11-06 18:34:00.599237: train_loss -0.8669
470
+ 2023-11-06 18:34:00.599401: val_loss -0.8475
471
+ 2023-11-06 18:34:00.599487: Pseudo dice [0.8697]
472
+ 2023-11-06 18:34:00.599569: Epoch time: 176.39 s
473
+ 2023-11-06 18:34:00.599637: Yayy! New best EMA pseudo Dice: 0.8683
474
+ 2023-11-06 18:34:04.322606:
475
+ 2023-11-06 18:34:04.322811: Epoch 57
476
+ 2023-11-06 18:34:04.322976: Current learning rate: 0.00949
477
+ 2023-11-06 18:37:00.717587: train_loss -0.87
478
+ 2023-11-06 18:37:00.717740: val_loss -0.8538
479
+ 2023-11-06 18:37:00.717831: Pseudo dice [0.8798]
480
+ 2023-11-06 18:37:00.717914: Epoch time: 176.4 s
481
+ 2023-11-06 18:37:00.717981: Yayy! New best EMA pseudo Dice: 0.8694
482
+ 2023-11-06 18:37:04.392341:
483
+ 2023-11-06 18:37:04.392533: Epoch 58
484
+ 2023-11-06 18:37:04.392726: Current learning rate: 0.00948
485
+ 2023-11-06 18:40:00.863341: train_loss -0.8766
486
+ 2023-11-06 18:40:00.863513: val_loss -0.8498
487
+ 2023-11-06 18:40:00.863588: Pseudo dice [0.8772]
488
+ 2023-11-06 18:40:00.863670: Epoch time: 176.47 s
489
+ 2023-11-06 18:40:00.863739: Yayy! New best EMA pseudo Dice: 0.8702
490
+ 2023-11-06 18:40:04.738446:
491
+ 2023-11-06 18:40:04.738628: Epoch 59
492
+ 2023-11-06 18:40:04.738788: Current learning rate: 0.00947
493
+ 2023-11-06 18:43:01.163184: train_loss -0.8755
494
+ 2023-11-06 18:43:01.163331: val_loss -0.8659
495
+ 2023-11-06 18:43:01.163406: Pseudo dice [0.8897]
496
+ 2023-11-06 18:43:01.163486: Epoch time: 176.43 s
497
+ 2023-11-06 18:43:01.163554: Yayy! New best EMA pseudo Dice: 0.8722
498
+ 2023-11-06 18:43:04.925010:
499
+ 2023-11-06 18:43:04.925187: Epoch 60
500
+ 2023-11-06 18:43:04.925321: Current learning rate: 0.00946
501
+ 2023-11-06 18:46:01.373083: train_loss -0.8753
502
+ 2023-11-06 18:46:01.373247: val_loss -0.8454
503
+ 2023-11-06 18:46:01.373333: Pseudo dice [0.8672]
504
+ 2023-11-06 18:46:01.373414: Epoch time: 176.45 s
505
+ 2023-11-06 18:46:02.581595:
506
+ 2023-11-06 18:46:02.581858: Epoch 61
507
+ 2023-11-06 18:46:02.582068: Current learning rate: 0.00945
508
+ 2023-11-06 18:48:59.052297: train_loss -0.8785
509
+ 2023-11-06 18:48:59.052459: val_loss -0.8629
510
+ 2023-11-06 18:48:59.052534: Pseudo dice [0.8868]
511
+ 2023-11-06 18:48:59.052615: Epoch time: 176.47 s
512
+ 2023-11-06 18:48:59.052684: Yayy! New best EMA pseudo Dice: 0.8732
513
+ 2023-11-06 18:49:02.888391:
514
+ 2023-11-06 18:49:02.888576: Epoch 62
515
+ 2023-11-06 18:49:02.888711: Current learning rate: 0.00944
516
+ 2023-11-06 18:51:59.286039: train_loss -0.8778
517
+ 2023-11-06 18:51:59.286191: val_loss -0.8577
518
+ 2023-11-06 18:51:59.286281: Pseudo dice [0.8829]
519
+ 2023-11-06 18:51:59.286364: Epoch time: 176.4 s
520
+ 2023-11-06 18:51:59.286432: Yayy! New best EMA pseudo Dice: 0.8742
521
+ 2023-11-06 18:52:03.100503:
522
+ 2023-11-06 18:52:03.100714: Epoch 63
523
+ 2023-11-06 18:52:03.100839: Current learning rate: 0.00943
524
+ 2023-11-06 18:54:59.529938: train_loss -0.8721
525
+ 2023-11-06 18:54:59.530098: val_loss -0.8551
526
+ 2023-11-06 18:54:59.530187: Pseudo dice [0.8758]
527
+ 2023-11-06 18:54:59.530271: Epoch time: 176.43 s
528
+ 2023-11-06 18:54:59.530339: Yayy! New best EMA pseudo Dice: 0.8743
529
+ 2023-11-06 18:55:03.201378:
530
+ 2023-11-06 18:55:03.201489: Epoch 64
531
+ 2023-11-06 18:55:03.201600: Current learning rate: 0.00942
532
+ 2023-11-06 18:57:59.642185: train_loss -0.874
533
+ 2023-11-06 18:57:59.642334: val_loss -0.8494
534
+ 2023-11-06 18:57:59.642408: Pseudo dice [0.8698]
535
+ 2023-11-06 18:57:59.642489: Epoch time: 176.44 s
536
+ 2023-11-06 18:58:00.855833:
537
+ 2023-11-06 18:58:00.855936: Epoch 65
538
+ 2023-11-06 18:58:00.856044: Current learning rate: 0.00941
539
+ 2023-11-06 19:00:57.294059: train_loss -0.8704
540
+ 2023-11-06 19:00:57.294223: val_loss -0.8465
541
+ 2023-11-06 19:00:57.294298: Pseudo dice [0.8712]
542
+ 2023-11-06 19:00:57.294379: Epoch time: 176.44 s
543
+ 2023-11-06 19:00:58.507846:
544
+ 2023-11-06 19:00:58.507949: Epoch 66
545
+ 2023-11-06 19:00:58.508061: Current learning rate: 0.0094
546
+ 2023-11-06 19:03:54.948499: train_loss -0.8728
547
+ 2023-11-06 19:03:54.948651: val_loss -0.8577
548
+ 2023-11-06 19:03:54.948726: Pseudo dice [0.8856]
549
+ 2023-11-06 19:03:54.948806: Epoch time: 176.44 s
550
+ 2023-11-06 19:03:54.948883: Yayy! New best EMA pseudo Dice: 0.8748
551
+ 2023-11-06 19:03:58.644292:
552
+ 2023-11-06 19:03:58.644467: Epoch 67
553
+ 2023-11-06 19:03:58.644624: Current learning rate: 0.00939
554
+ 2023-11-06 19:06:55.070551: train_loss -0.8793
555
+ 2023-11-06 19:06:55.070726: val_loss -0.8365
556
+ 2023-11-06 19:06:55.070812: Pseudo dice [0.8573]
557
+ 2023-11-06 19:06:55.070893: Epoch time: 176.43 s
558
+ 2023-11-06 19:06:56.467451:
559
+ 2023-11-06 19:06:56.467644: Epoch 68
560
+ 2023-11-06 19:06:56.467778: Current learning rate: 0.00939
561
+ 2023-11-06 19:09:52.921178: train_loss -0.8789
562
+ 2023-11-06 19:09:52.921346: val_loss -0.8588
563
+ 2023-11-06 19:09:52.921423: Pseudo dice [0.8809]
564
+ 2023-11-06 19:09:52.921506: Epoch time: 176.45 s
565
+ 2023-11-06 19:09:54.201082:
566
+ 2023-11-06 19:09:54.201194: Epoch 69
567
+ 2023-11-06 19:09:54.201310: Current learning rate: 0.00938
568
+ 2023-11-06 19:12:50.658302: train_loss -0.8837
569
+ 2023-11-06 19:12:50.658458: val_loss -0.8573
570
+ 2023-11-06 19:12:50.658547: Pseudo dice [0.8799]
571
+ 2023-11-06 19:12:50.658629: Epoch time: 176.46 s
572
+ 2023-11-06 19:12:51.892091:
573
+ 2023-11-06 19:12:51.892203: Epoch 70
574
+ 2023-11-06 19:12:51.892322: Current learning rate: 0.00937
575
+ 2023-11-06 19:15:48.326275: train_loss -0.8812
576
+ 2023-11-06 19:15:48.326422: val_loss -0.8346
577
+ 2023-11-06 19:15:48.326498: Pseudo dice [0.8563]
578
+ 2023-11-06 19:15:48.326578: Epoch time: 176.44 s
579
+ 2023-11-06 19:15:49.567336:
580
+ 2023-11-06 19:15:49.567445: Epoch 71
581
+ 2023-11-06 19:15:49.567556: Current learning rate: 0.00936
582
+ 2023-11-06 19:18:46.021291: train_loss -0.8699
583
+ 2023-11-06 19:18:46.021448: val_loss -0.8494
584
+ 2023-11-06 19:18:46.021525: Pseudo dice [0.8725]
585
+ 2023-11-06 19:18:46.021603: Epoch time: 176.45 s
586
+ 2023-11-06 19:18:47.250256:
587
+ 2023-11-06 19:18:47.250425: Epoch 72
588
+ 2023-11-06 19:18:47.250582: Current learning rate: 0.00935
589
+ 2023-11-06 19:21:43.663433: train_loss -0.8813
590
+ 2023-11-06 19:21:43.663581: val_loss -0.8628
591
+ 2023-11-06 19:21:43.663656: Pseudo dice [0.8877]
592
+ 2023-11-06 19:21:43.663738: Epoch time: 176.41 s
593
+ 2023-11-06 19:21:44.899969:
594
+ 2023-11-06 19:21:44.900087: Epoch 73
595
+ 2023-11-06 19:21:44.900187: Current learning rate: 0.00934
596
+ 2023-11-06 19:24:41.355096: train_loss -0.8848
597
+ 2023-11-06 19:24:41.355253: val_loss -0.8729
598
+ 2023-11-06 19:24:41.355342: Pseudo dice [0.8959]
599
+ 2023-11-06 19:24:41.355534: Epoch time: 176.46 s
600
+ 2023-11-06 19:24:41.355706: Yayy! New best EMA pseudo Dice: 0.8763
601
+ 2023-11-06 19:24:45.283551:
602
+ 2023-11-06 19:24:45.283751: Epoch 74
603
+ 2023-11-06 19:24:45.283895: Current learning rate: 0.00933
604
+ 2023-11-06 19:27:41.765228: train_loss -0.8846
605
+ 2023-11-06 19:27:41.765392: val_loss -0.872
606
+ 2023-11-06 19:27:41.765470: Pseudo dice [0.8978]
607
+ 2023-11-06 19:27:41.765549: Epoch time: 176.48 s
608
+ 2023-11-06 19:27:41.765617: Yayy! New best EMA pseudo Dice: 0.8784
609
+ 2023-11-06 19:27:45.539730:
610
+ 2023-11-06 19:27:45.539862: Epoch 75
611
+ 2023-11-06 19:27:45.539963: Current learning rate: 0.00932
612
+ 2023-11-06 19:30:42.034406: train_loss -0.8815
613
+ 2023-11-06 19:30:42.034564: val_loss -0.8291
614
+ 2023-11-06 19:30:42.034640: Pseudo dice [0.8516]
615
+ 2023-11-06 19:30:42.034730: Epoch time: 176.5 s
616
+ 2023-11-06 19:30:43.273652:
617
+ 2023-11-06 19:30:43.273767: Epoch 76
618
+ 2023-11-06 19:30:43.273881: Current learning rate: 0.00931
619
+ 2023-11-06 19:33:39.741374: train_loss -0.88
620
+ 2023-11-06 19:33:39.741546: val_loss -0.8499
621
+ 2023-11-06 19:33:39.741620: Pseudo dice [0.8731]
622
+ 2023-11-06 19:33:39.741700: Epoch time: 176.47 s
623
+ 2023-11-06 19:33:40.987435:
624
+ 2023-11-06 19:33:40.987546: Epoch 77
625
+ 2023-11-06 19:33:40.987658: Current learning rate: 0.0093
626
+ 2023-11-06 19:36:37.429662: train_loss -0.8696
627
+ 2023-11-06 19:36:37.429826: val_loss -0.8525
628
+ 2023-11-06 19:36:37.429902: Pseudo dice [0.8799]
629
+ 2023-11-06 19:36:37.429986: Epoch time: 176.44 s
630
+ 2023-11-06 19:36:38.686904:
631
+ 2023-11-06 19:36:38.687114: Epoch 78
632
+ 2023-11-06 19:36:38.687254: Current learning rate: 0.0093
633
+ 2023-11-06 19:39:35.086695: train_loss -0.8767
634
+ 2023-11-06 19:39:35.086842: val_loss -0.8465
635
+ 2023-11-06 19:39:35.086917: Pseudo dice [0.8672]
636
+ 2023-11-06 19:39:35.086996: Epoch time: 176.4 s
637
+ 2023-11-06 19:39:36.354577:
638
+ 2023-11-06 19:39:36.354711: Epoch 79
639
+ 2023-11-06 19:39:36.354815: Current learning rate: 0.00929
640
+ 2023-11-06 19:42:32.753545: train_loss -0.8829
641
+ 2023-11-06 19:42:32.753708: val_loss -0.8652
642
+ 2023-11-06 19:42:32.753783: Pseudo dice [0.8881]
643
+ 2023-11-06 19:42:32.753864: Epoch time: 176.4 s
644
+ 2023-11-06 19:42:34.011358:
645
+ 2023-11-06 19:42:34.011475: Epoch 80
646
+ 2023-11-06 19:42:34.011607: Current learning rate: 0.00928
647
+ 2023-11-06 19:45:30.363870: train_loss -0.8756
648
+ 2023-11-06 19:45:30.364022: val_loss -0.861
649
+ 2023-11-06 19:45:30.364097: Pseudo dice [0.8838]
650
+ 2023-11-06 19:45:30.364178: Epoch time: 176.35 s
651
+ 2023-11-06 19:45:31.820354:
652
+ 2023-11-06 19:45:31.820674: Epoch 81
653
+ 2023-11-06 19:45:31.820861: Current learning rate: 0.00927
654
+ 2023-11-06 19:48:28.178406: train_loss -0.8781
655
+ 2023-11-06 19:48:28.178582: val_loss -0.8509
656
+ 2023-11-06 19:48:28.178657: Pseudo dice [0.8773]
657
+ 2023-11-06 19:48:28.178750: Epoch time: 176.36 s
658
+ 2023-11-06 19:48:29.440243:
659
+ 2023-11-06 19:48:29.440436: Epoch 82
660
+ 2023-11-06 19:48:29.440587: Current learning rate: 0.00926
661
+ 2023-11-06 19:51:25.698233: train_loss -0.8825
662
+ 2023-11-06 19:51:25.698397: val_loss -0.8621
663
+ 2023-11-06 19:51:25.698472: Pseudo dice [0.8812]
664
+ 2023-11-06 19:51:25.698555: Epoch time: 176.26 s
665
+ 2023-11-06 19:51:26.887679:
666
+ 2023-11-06 19:51:26.887791: Epoch 83
667
+ 2023-11-06 19:51:26.887902: Current learning rate: 0.00925
668
+ 2023-11-06 19:54:23.190815: train_loss -0.8814
669
+ 2023-11-06 19:54:23.190965: val_loss -0.8689
670
+ 2023-11-06 19:54:23.191038: Pseudo dice [0.8931]
671
+ 2023-11-06 19:54:23.191119: Epoch time: 176.3 s
672
+ 2023-11-06 19:54:23.191189: Yayy! New best EMA pseudo Dice: 0.8791
673
+ 2023-11-06 19:54:26.910474:
674
+ 2023-11-06 19:54:26.910597: Epoch 84
675
+ 2023-11-06 19:54:26.910706: Current learning rate: 0.00924
676
+ 2023-11-06 19:57:23.203994: train_loss -0.8828
677
+ 2023-11-06 19:57:23.204146: val_loss -0.858
678
+ 2023-11-06 19:57:23.204239: Pseudo dice [0.8772]
679
+ 2023-11-06 19:57:23.204320: Epoch time: 176.29 s
680
+ 2023-11-06 19:57:24.369579:
681
+ 2023-11-06 19:57:24.369682: Epoch 85
682
+ 2023-11-06 19:57:24.369791: Current learning rate: 0.00923
683
+ 2023-11-06 20:00:20.694928: train_loss -0.881
684
+ 2023-11-06 20:00:20.695078: val_loss -0.8619
685
+ 2023-11-06 20:00:20.695153: Pseudo dice [0.8811]
686
+ 2023-11-06 20:00:20.695234: Epoch time: 176.33 s
687
+ 2023-11-06 20:00:20.695303: Yayy! New best EMA pseudo Dice: 0.8791
688
+ 2023-11-06 20:00:24.474195:
689
+ 2023-11-06 20:00:24.474414: Epoch 86
690
+ 2023-11-06 20:00:24.474562: Current learning rate: 0.00922
691
+ 2023-11-06 20:03:20.776078: train_loss -0.8807
692
+ 2023-11-06 20:03:20.776237: val_loss -0.8326
693
+ 2023-11-06 20:03:20.776313: Pseudo dice [0.8566]
694
+ 2023-11-06 20:03:20.776393: Epoch time: 176.3 s
695
+ 2023-11-06 20:03:22.165833:
696
+ 2023-11-06 20:03:22.166030: Epoch 87
697
+ 2023-11-06 20:03:22.166198: Current learning rate: 0.00921
698
+ 2023-11-06 20:06:18.511789: train_loss -0.8766
699
+ 2023-11-06 20:06:18.511945: val_loss -0.8587
700
+ 2023-11-06 20:06:18.512021: Pseudo dice [0.8834]
701
+ 2023-11-06 20:06:18.512102: Epoch time: 176.35 s
702
+ 2023-11-06 20:06:19.705326:
703
+ 2023-11-06 20:06:19.705506: Epoch 88
704
+ 2023-11-06 20:06:19.705671: Current learning rate: 0.0092
705
+ 2023-11-06 20:09:16.102405: train_loss -0.8611
706
+ 2023-11-06 20:09:16.102557: val_loss -0.8566
707
+ 2023-11-06 20:09:16.102633: Pseudo dice [0.8826]
708
+ 2023-11-06 20:09:16.102726: Epoch time: 176.4 s
709
+ 2023-11-06 20:09:17.325261:
710
+ 2023-11-06 20:09:17.325375: Epoch 89
711
+ 2023-11-06 20:09:17.325486: Current learning rate: 0.0092
712
+ 2023-11-06 20:12:13.733597: train_loss -0.8696
713
+ 2023-11-06 20:12:13.733757: val_loss -0.8598
714
+ 2023-11-06 20:12:13.733834: Pseudo dice [0.8753]
715
+ 2023-11-06 20:12:13.733914: Epoch time: 176.41 s
716
+ 2023-11-06 20:12:14.911806:
717
+ 2023-11-06 20:12:14.912000: Epoch 90
718
+ 2023-11-06 20:12:14.912168: Current learning rate: 0.00919
719
+ 2023-11-06 20:15:11.320757: train_loss -0.8844
720
+ 2023-11-06 20:15:11.320930: val_loss -0.874
721
+ 2023-11-06 20:15:11.321006: Pseudo dice [0.8968]
722
+ 2023-11-06 20:15:11.321088: Epoch time: 176.41 s
723
+ 2023-11-06 20:15:11.321157: Yayy! New best EMA pseudo Dice: 0.8797
724
+ 2023-11-06 20:15:15.020047:
725
+ 2023-11-06 20:15:15.020150: Epoch 91
726
+ 2023-11-06 20:15:15.020265: Current learning rate: 0.00918
727
+ 2023-11-06 20:18:11.393252: train_loss -0.8807
728
+ 2023-11-06 20:18:11.393438: val_loss -0.8466
729
+ 2023-11-06 20:18:11.393514: Pseudo dice [0.8746]
730
+ 2023-11-06 20:18:11.393595: Epoch time: 176.37 s
731
+ 2023-11-06 20:18:12.556402:
732
+ 2023-11-06 20:18:12.556512: Epoch 92
733
+ 2023-11-06 20:18:12.556623: Current learning rate: 0.00917
734
+ 2023-11-06 20:21:08.905787: train_loss -0.8848
735
+ 2023-11-06 20:21:08.905945: val_loss -0.8433
736
+ 2023-11-06 20:21:08.906022: Pseudo dice [0.8635]
737
+ 2023-11-06 20:21:08.906103: Epoch time: 176.35 s
738
+ 2023-11-06 20:21:10.072888:
739
+ 2023-11-06 20:21:10.073041: Epoch 93
740
+ 2023-11-06 20:21:10.073219: Current learning rate: 0.00916
741
+ 2023-11-06 20:24:06.373487: train_loss -0.8804
742
+ 2023-11-06 20:24:06.373657: val_loss -0.8726
743
+ 2023-11-06 20:24:06.373732: Pseudo dice [0.8952]
744
+ 2023-11-06 20:24:06.373815: Epoch time: 176.3 s
745
+ 2023-11-06 20:24:07.529997:
746
+ 2023-11-06 20:24:07.530097: Epoch 94
747
+ 2023-11-06 20:24:07.530207: Current learning rate: 0.00915
748
+ 2023-11-06 20:27:03.911087: train_loss -0.8804
749
+ 2023-11-06 20:27:03.911239: val_loss -0.8565
750
+ 2023-11-06 20:27:03.911312: Pseudo dice [0.8782]
751
+ 2023-11-06 20:27:03.911390: Epoch time: 176.38 s
752
+ 2023-11-06 20:27:05.089740:
753
+ 2023-11-06 20:27:05.089859: Epoch 95
754
+ 2023-11-06 20:27:05.089960: Current learning rate: 0.00914
755
+ 2023-11-06 20:30:01.458935: train_loss -0.8834
756
+ 2023-11-06 20:30:01.459089: val_loss -0.8613
757
+ 2023-11-06 20:30:01.459166: Pseudo dice [0.8833]
758
+ 2023-11-06 20:30:01.459248: Epoch time: 176.37 s
759
+ 2023-11-06 20:30:02.626966:
760
+ 2023-11-06 20:30:02.627091: Epoch 96
761
+ 2023-11-06 20:30:02.627204: Current learning rate: 0.00913
762
+ 2023-11-06 20:32:59.056989: train_loss -0.8829
763
+ 2023-11-06 20:32:59.057152: val_loss -0.8654
764
+ 2023-11-06 20:32:59.057229: Pseudo dice [0.8909]
765
+ 2023-11-06 20:32:59.057323: Epoch time: 176.43 s
766
+ 2023-11-06 20:32:59.057393: Yayy! New best EMA pseudo Dice: 0.8808
767
+ 2023-11-06 20:33:03.074519:
768
+ 2023-11-06 20:33:03.074691: Epoch 97
769
+ 2023-11-06 20:33:03.074805: Current learning rate: 0.00912
770
+ 2023-11-06 20:35:59.522542: train_loss -0.8854
771
+ 2023-11-06 20:35:59.522715: val_loss -0.8669
772
+ 2023-11-06 20:35:59.522791: Pseudo dice [0.8874]
773
+ 2023-11-06 20:35:59.522873: Epoch time: 176.45 s
774
+ 2023-11-06 20:35:59.522942: Yayy! New best EMA pseudo Dice: 0.8814
775
+ 2023-11-06 20:36:03.223871:
776
+ 2023-11-06 20:36:03.224048: Epoch 98
777
+ 2023-11-06 20:36:03.224208: Current learning rate: 0.00911
778
+ 2023-11-06 20:38:59.693738: train_loss -0.888
779
+ 2023-11-06 20:38:59.693887: val_loss -0.8758
780
+ 2023-11-06 20:38:59.693961: Pseudo dice [0.8969]
781
+ 2023-11-06 20:38:59.694042: Epoch time: 176.47 s
782
+ 2023-11-06 20:38:59.694110: Yayy! New best EMA pseudo Dice: 0.883
783
+ 2023-11-06 20:39:03.491852:
784
+ 2023-11-06 20:39:03.491959: Epoch 99
785
+ 2023-11-06 20:39:03.492074: Current learning rate: 0.0091
786
+ 2023-11-06 20:41:59.887711: train_loss -0.8891
787
+ 2023-11-06 20:41:59.887869: val_loss -0.864
788
+ 2023-11-06 20:41:59.887945: Pseudo dice [0.8848]
789
+ 2023-11-06 20:41:59.888027: Epoch time: 176.4 s
790
+ 2023-11-06 20:42:02.448756: Yayy! New best EMA pseudo Dice: 0.8832
791
+ 2023-11-06 20:42:06.350418:
792
+ 2023-11-06 20:42:06.350521: Epoch 100
793
+ 2023-11-06 20:42:06.350630: Current learning rate: 0.0091
794
+ 2023-11-06 20:45:02.788003: train_loss -0.8809
795
+ 2023-11-06 20:45:02.788165: val_loss -0.86
796
+ 2023-11-06 20:45:02.788240: Pseudo dice [0.882]
797
+ 2023-11-06 20:45:02.788321: Epoch time: 176.44 s
798
+ 2023-11-06 20:45:04.140347:
799
+ 2023-11-06 20:45:04.140464: Epoch 101
800
+ 2023-11-06 20:45:04.140565: Current learning rate: 0.00909
801
+ 2023-11-06 20:48:00.548010: train_loss -0.8832
802
+ 2023-11-06 20:48:00.548170: val_loss -0.8491
803
+ 2023-11-06 20:48:00.548246: Pseudo dice [0.8742]
804
+ 2023-11-06 20:48:00.548327: Epoch time: 176.41 s
805
+ 2023-11-06 20:48:01.732744:
806
+ 2023-11-06 20:48:01.732957: Epoch 102
807
+ 2023-11-06 20:48:01.733103: Current learning rate: 0.00908
808
+ 2023-11-06 20:50:58.159639: train_loss -0.877
809
+ 2023-11-06 20:50:58.159799: val_loss -0.8482
810
+ 2023-11-06 20:50:58.159888: Pseudo dice [0.8696]
811
+ 2023-11-06 20:50:58.159972: Epoch time: 176.43 s
812
+ 2023-11-06 20:50:59.342631:
813
+ 2023-11-06 20:50:59.342772: Epoch 103
814
+ 2023-11-06 20:50:59.342875: Current learning rate: 0.00907
815
+ 2023-11-06 20:53:55.725447: train_loss -0.8714
816
+ 2023-11-06 20:53:55.725600: val_loss -0.8409
817
+ 2023-11-06 20:53:55.725675: Pseudo dice [0.8632]
818
+ 2023-11-06 20:53:55.725756: Epoch time: 176.38 s
819
+ 2023-11-06 20:53:56.909122:
820
+ 2023-11-06 20:53:56.909225: Epoch 104
821
+ 2023-11-06 20:53:56.909337: Current learning rate: 0.00906
822
+ 2023-11-06 20:56:53.262245: train_loss -0.8787
823
+ 2023-11-06 20:56:53.262409: val_loss -0.8545
824
+ 2023-11-06 20:56:53.262486: Pseudo dice [0.8818]
825
+ 2023-11-06 20:56:53.262566: Epoch time: 176.35 s
826
+ 2023-11-06 20:56:54.449732:
827
+ 2023-11-06 20:56:54.449837: Epoch 105
828
+ 2023-11-06 20:56:54.449948: Current learning rate: 0.00905
829
+ 2023-11-06 20:59:50.785306: train_loss -0.8839
830
+ 2023-11-06 20:59:50.785467: val_loss -0.8415
831
+ 2023-11-06 20:59:50.785542: Pseudo dice [0.8669]
832
+ 2023-11-06 20:59:50.785623: Epoch time: 176.34 s
833
+ 2023-11-06 20:59:51.966831:
834
+ 2023-11-06 20:59:51.966952: Epoch 106
835
+ 2023-11-06 20:59:51.967052: Current learning rate: 0.00904
836
+ 2023-11-06 21:02:48.309617: train_loss -0.88
837
+ 2023-11-06 21:02:48.309768: val_loss -0.8594
838
+ 2023-11-06 21:02:48.309843: Pseudo dice [0.8835]
839
+ 2023-11-06 21:02:48.309925: Epoch time: 176.34 s
840
+ 2023-11-06 21:02:49.506139:
841
+ 2023-11-06 21:02:49.506242: Epoch 107
842
+ 2023-11-06 21:02:49.506354: Current learning rate: 0.00903
843
+ 2023-11-06 21:05:45.783998: train_loss -0.8839
844
+ 2023-11-06 21:05:45.784148: val_loss -0.8581
845
+ 2023-11-06 21:05:45.784224: Pseudo dice [0.8814]
846
+ 2023-11-06 21:05:45.784304: Epoch time: 176.28 s
847
+ 2023-11-06 21:05:47.150584:
848
+ 2023-11-06 21:05:47.150725: Epoch 108
849
+ 2023-11-06 21:05:47.150827: Current learning rate: 0.00902
850
+ 2023-11-06 21:08:43.547193: train_loss -0.8816
851
+ 2023-11-06 21:08:43.547377: val_loss -0.8534
852
+ 2023-11-06 21:08:43.547452: Pseudo dice [0.8743]
853
+ 2023-11-06 21:08:43.547533: Epoch time: 176.4 s
854
+ 2023-11-06 21:08:44.748945:
855
+ 2023-11-06 21:08:44.749061: Epoch 109
856
+ 2023-11-06 21:08:44.749174: Current learning rate: 0.00901
857
+ 2023-11-06 21:11:41.201078: train_loss -0.8812
858
+ 2023-11-06 21:11:41.201233: val_loss -0.8391
859
+ 2023-11-06 21:11:41.201322: Pseudo dice [0.8625]
860
+ 2023-11-06 21:11:41.201404: Epoch time: 176.45 s
861
+ 2023-11-06 21:11:42.391637:
862
+ 2023-11-06 21:11:42.391749: Epoch 110
863
+ 2023-11-06 21:11:42.391862: Current learning rate: 0.009
864
+ 2023-11-06 21:14:38.800912: train_loss -0.8673
865
+ 2023-11-06 21:14:38.801063: val_loss -0.8553
866
+ 2023-11-06 21:14:38.801137: Pseudo dice [0.8743]
867
+ 2023-11-06 21:14:38.801221: Epoch time: 176.41 s
868
+ 2023-11-06 21:14:39.989080:
869
+ 2023-11-06 21:14:39.989188: Epoch 111
870
+ 2023-11-06 21:14:39.989299: Current learning rate: 0.009
871
+ 2023-11-06 21:17:36.358418: train_loss -0.8816
872
+ 2023-11-06 21:17:36.358567: val_loss -0.8514
873
+ 2023-11-06 21:17:36.358642: Pseudo dice [0.872]
874
+ 2023-11-06 21:17:36.358733: Epoch time: 176.37 s
875
+ 2023-11-06 21:17:37.553327:
876
+ 2023-11-06 21:17:37.553550: Epoch 112
877
+ 2023-11-06 21:17:37.553718: Current learning rate: 0.00899
878
+ 2023-11-06 21:20:33.939162: train_loss -0.8878
879
+ 2023-11-06 21:20:33.939327: val_loss -0.8622
880
+ 2023-11-06 21:20:33.939407: Pseudo dice [0.889]
881
+ 2023-11-06 21:20:33.939494: Epoch time: 176.39 s
882
+ 2023-11-06 21:20:35.131583:
883
+ 2023-11-06 21:20:35.131686: Epoch 113
884
+ 2023-11-06 21:20:35.131798: Current learning rate: 0.00898
885
+ 2023-11-06 21:23:31.516380: train_loss -0.8791
886
+ 2023-11-06 21:23:31.516539: val_loss -0.8621
887
+ 2023-11-06 21:23:31.516615: Pseudo dice [0.8829]
888
+ 2023-11-06 21:23:31.516695: Epoch time: 176.39 s
889
+ 2023-11-06 21:23:32.699686:
890
+ 2023-11-06 21:23:32.699787: Epoch 114
891
+ 2023-11-06 21:23:32.699900: Current learning rate: 0.00897
892
+ 2023-11-06 21:26:29.109154: train_loss -0.8844
893
+ 2023-11-06 21:26:29.109318: val_loss -0.8657
894
+ 2023-11-06 21:26:29.109393: Pseudo dice [0.8861]
895
+ 2023-11-06 21:26:29.109473: Epoch time: 176.41 s
896
+ 2023-11-06 21:26:30.306096:
897
+ 2023-11-06 21:26:30.306210: Epoch 115
898
+ 2023-11-06 21:26:30.306322: Current learning rate: 0.00896
899
+ 2023-11-06 21:29:26.732492: train_loss -0.8875
900
+ 2023-11-06 21:29:26.732640: val_loss -0.859
901
+ 2023-11-06 21:29:26.732717: Pseudo dice [0.8805]
902
+ 2023-11-06 21:29:26.732799: Epoch time: 176.43 s
903
+ 2023-11-06 21:29:27.938756:
904
+ 2023-11-06 21:29:27.938874: Epoch 116
905
+ 2023-11-06 21:29:27.938974: Current learning rate: 0.00895
906
+ 2023-11-06 21:32:24.353024: train_loss -0.8903
907
+ 2023-11-06 21:32:24.353175: val_loss -0.8753
908
+ 2023-11-06 21:32:24.353260: Pseudo dice [0.8974]
909
+ 2023-11-06 21:32:24.353342: Epoch time: 176.42 s
910
+ 2023-11-06 21:32:25.560242:
911
+ 2023-11-06 21:32:25.560357: Epoch 117
912
+ 2023-11-06 21:32:25.560468: Current learning rate: 0.00894
913
+ 2023-11-06 21:35:21.973638: train_loss -0.8902
914
+ 2023-11-06 21:35:21.973792: val_loss -0.8693
915
+ 2023-11-06 21:35:21.973866: Pseudo dice [0.886]
916
+ 2023-11-06 21:35:21.973946: Epoch time: 176.41 s
917
+ 2023-11-06 21:35:23.182139:
918
+ 2023-11-06 21:35:23.182250: Epoch 118
919
+ 2023-11-06 21:35:23.182350: Current learning rate: 0.00893
920
+ 2023-11-06 21:38:19.585496: train_loss -0.8897
921
+ 2023-11-06 21:38:19.585651: val_loss -0.8649
922
+ 2023-11-06 21:38:19.585727: Pseudo dice [0.8867]
923
+ 2023-11-06 21:38:19.585807: Epoch time: 176.4 s
924
+ 2023-11-06 21:38:20.796273:
925
+ 2023-11-06 21:38:20.796504: Epoch 119
926
+ 2023-11-06 21:38:20.796669: Current learning rate: 0.00892
927
+ 2023-11-06 21:41:17.226026: train_loss -0.8891
928
+ 2023-11-06 21:41:17.226170: val_loss -0.8673
929
+ 2023-11-06 21:41:17.226246: Pseudo dice [0.8863]
930
+ 2023-11-06 21:41:17.226326: Epoch time: 176.43 s
931
+ 2023-11-06 21:41:18.439796:
932
+ 2023-11-06 21:41:18.439897: Epoch 120
933
+ 2023-11-06 21:41:18.440009: Current learning rate: 0.00891
934
+ 2023-11-06 21:44:14.883110: train_loss -0.8945
935
+ 2023-11-06 21:44:14.883265: val_loss -0.8616
936
+ 2023-11-06 21:44:14.883339: Pseudo dice [0.8777]
937
+ 2023-11-06 21:44:14.883422: Epoch time: 176.44 s
938
+ 2023-11-06 21:44:16.260323:
939
+ 2023-11-06 21:44:16.260516: Epoch 121
940
+ 2023-11-06 21:44:16.260681: Current learning rate: 0.0089
941
+ 2023-11-06 21:47:12.696718: train_loss -0.8875
942
+ 2023-11-06 21:47:12.696870: val_loss -0.8579
943
+ 2023-11-06 21:47:12.696945: Pseudo dice [0.8785]
944
+ 2023-11-06 21:47:12.697035: Epoch time: 176.44 s
945
+ 2023-11-06 21:47:13.909975:
946
+ 2023-11-06 21:47:13.910104: Epoch 122
947
+ 2023-11-06 21:47:13.910216: Current learning rate: 0.00889
948
+ 2023-11-06 21:50:10.344229: train_loss -0.8918
949
+ 2023-11-06 21:50:10.344399: val_loss -0.8618
950
+ 2023-11-06 21:50:10.344475: Pseudo dice [0.8803]
951
+ 2023-11-06 21:50:10.344555: Epoch time: 176.44 s
952
+ 2023-11-06 21:50:11.550210:
953
+ 2023-11-06 21:50:11.550336: Epoch 123
954
+ 2023-11-06 21:50:11.550436: Current learning rate: 0.00889
955
+ 2023-11-06 21:53:07.959642: train_loss -0.8855
956
+ 2023-11-06 21:53:07.959799: val_loss -0.8657
957
+ 2023-11-06 21:53:07.959883: Pseudo dice [0.8832]
958
+ 2023-11-06 21:53:07.959962: Epoch time: 176.41 s
959
+ 2023-11-06 21:53:09.170607:
960
+ 2023-11-06 21:53:09.170735: Epoch 124
961
+ 2023-11-06 21:53:09.170838: Current learning rate: 0.00888
962
+ 2023-11-06 21:56:05.627384: train_loss -0.8882
963
+ 2023-11-06 21:56:05.627546: val_loss -0.8598
964
+ 2023-11-06 21:56:05.627622: Pseudo dice [0.8831]
965
+ 2023-11-06 21:56:05.627704: Epoch time: 176.46 s
966
+ 2023-11-06 21:56:06.832944:
967
+ 2023-11-06 21:56:06.833049: Epoch 125
968
+ 2023-11-06 21:56:06.833164: Current learning rate: 0.00887
969
+ 2023-11-06 21:59:03.264384: train_loss -0.8912
970
+ 2023-11-06 21:59:03.264564: val_loss -0.8686
971
+ 2023-11-06 21:59:03.264639: Pseudo dice [0.8854]
972
+ 2023-11-06 21:59:03.264722: Epoch time: 176.43 s
973
+ 2023-11-06 21:59:04.472068:
974
+ 2023-11-06 21:59:04.472172: Epoch 126
975
+ 2023-11-06 21:59:04.472286: Current learning rate: 0.00886
976
+ 2023-11-06 22:02:00.886974: train_loss -0.8895
977
+ 2023-11-06 22:02:00.887155: val_loss -0.8609
978
+ 2023-11-06 22:02:00.887231: Pseudo dice [0.8808]
979
+ 2023-11-06 22:02:00.887313: Epoch time: 176.42 s
980
+ 2023-11-06 22:02:02.092915:
981
+ 2023-11-06 22:02:02.093020: Epoch 127
982
+ 2023-11-06 22:02:02.093131: Current learning rate: 0.00885
983
+ 2023-11-06 22:04:58.532643: train_loss -0.8942
984
+ 2023-11-06 22:04:58.532804: val_loss -0.856
985
+ 2023-11-06 22:04:58.532881: Pseudo dice [0.8764]
986
+ 2023-11-06 22:04:58.532962: Epoch time: 176.44 s
987
+ 2023-11-06 22:04:59.923069:
988
+ 2023-11-06 22:04:59.923262: Epoch 128
989
+ 2023-11-06 22:04:59.923422: Current learning rate: 0.00884
990
+ 2023-11-06 22:07:56.368618: train_loss -0.8911
991
+ 2023-11-06 22:07:56.368758: val_loss -0.8569
992
+ 2023-11-06 22:07:56.368834: Pseudo dice [0.8806]
993
+ 2023-11-06 22:07:56.368916: Epoch time: 176.45 s
994
+ 2023-11-06 22:07:57.576491:
995
+ 2023-11-06 22:07:57.576683: Epoch 129
996
+ 2023-11-06 22:07:57.576844: Current learning rate: 0.00883
997
+ 2023-11-06 22:10:54.039671: train_loss -0.8929
998
+ 2023-11-06 22:10:54.039825: val_loss -0.8671
999
+ 2023-11-06 22:10:54.039901: Pseudo dice [0.8913]
1000
+ 2023-11-06 22:10:54.039982: Epoch time: 176.46 s
1001
+ 2023-11-06 22:10:55.260360:
1002
+ 2023-11-06 22:10:55.260475: Epoch 130
1003
+ 2023-11-06 22:10:55.260589: Current learning rate: 0.00882
1004
+ 2023-11-06 22:13:51.699147: train_loss -0.8861
1005
+ 2023-11-06 22:13:51.699318: val_loss -0.8656
1006
+ 2023-11-06 22:13:51.699396: Pseudo dice [0.8831]
1007
+ 2023-11-06 22:13:51.699496: Epoch time: 176.44 s
1008
+ 2023-11-06 22:13:52.916131:
1009
+ 2023-11-06 22:13:52.916380: Epoch 131
1010
+ 2023-11-06 22:13:52.916545: Current learning rate: 0.00881
1011
+ 2023-11-06 22:16:49.273060: train_loss -0.8888
1012
+ 2023-11-06 22:16:49.273220: val_loss -0.8659
1013
+ 2023-11-06 22:16:49.273299: Pseudo dice [0.8879]
1014
+ 2023-11-06 22:16:49.273386: Epoch time: 176.36 s
1015
+ 2023-11-06 22:16:50.502149:
1016
+ 2023-11-06 22:16:50.502269: Epoch 132
1017
+ 2023-11-06 22:16:50.502374: Current learning rate: 0.0088
1018
+ 2023-11-06 22:19:46.882664: train_loss -0.8823
1019
+ 2023-11-06 22:19:46.882834: val_loss -0.827
1020
+ 2023-11-06 22:19:46.882970: Pseudo dice [0.8442]
1021
+ 2023-11-06 22:19:46.883127: Epoch time: 176.38 s
1022
+ 2023-11-06 22:19:48.110950:
1023
+ 2023-11-06 22:19:48.111143: Epoch 133
1024
+ 2023-11-06 22:19:48.111308: Current learning rate: 0.00879
1025
+ 2023-11-06 22:22:44.487801: train_loss -0.8822
1026
+ 2023-11-06 22:22:44.487956: val_loss -0.8556
1027
+ 2023-11-06 22:22:44.488037: Pseudo dice [0.882]
1028
+ 2023-11-06 22:22:44.488124: Epoch time: 176.38 s
1029
+ 2023-11-06 22:22:45.701651:
1030
+ 2023-11-06 22:22:45.701772: Epoch 134
1031
+ 2023-11-06 22:22:45.701874: Current learning rate: 0.00879
1032
+ 2023-11-06 22:25:42.355932: train_loss -0.8884
1033
+ 2023-11-06 22:25:42.356109: val_loss -0.8528
1034
+ 2023-11-06 22:25:42.356188: Pseudo dice [0.8776]
1035
+ 2023-11-06 22:25:42.356273: Epoch time: 176.66 s
1036
+ 2023-11-06 22:25:43.591560:
1037
+ 2023-11-06 22:25:43.591691: Epoch 135
1038
+ 2023-11-06 22:25:43.591795: Current learning rate: 0.00878
1039
+ 2023-11-06 22:28:40.011894: train_loss -0.8863
1040
+ 2023-11-06 22:28:40.012058: val_loss -0.8442
1041
+ 2023-11-06 22:28:40.012137: Pseudo dice [0.8561]
1042
+ 2023-11-06 22:28:40.012221: Epoch time: 176.42 s
1043
+ 2023-11-06 22:28:41.247354:
1044
+ 2023-11-06 22:28:41.247483: Epoch 136
1045
+ 2023-11-06 22:28:41.247587: Current learning rate: 0.00877
1046
+ 2023-11-06 22:31:37.682845: train_loss -0.8871
1047
+ 2023-11-06 22:31:37.683006: val_loss -0.8567
1048
+ 2023-11-06 22:31:37.683086: Pseudo dice [0.877]
1049
+ 2023-11-06 22:31:37.683170: Epoch time: 176.44 s
1050
+ 2023-11-06 22:31:38.919743:
1051
+ 2023-11-06 22:31:38.919875: Epoch 137
1052
+ 2023-11-06 22:31:38.919981: Current learning rate: 0.00876
1053
+ 2023-11-06 22:34:35.347654: train_loss -0.8916
1054
+ 2023-11-06 22:34:35.347814: val_loss -0.8603
1055
+ 2023-11-06 22:34:35.347895: Pseudo dice [0.883]
1056
+ 2023-11-06 22:34:35.347983: Epoch time: 176.43 s
1057
+ 2023-11-06 22:34:36.585523:
1058
+ 2023-11-06 22:34:36.585646: Epoch 138
1059
+ 2023-11-06 22:34:36.585748: Current learning rate: 0.00875
1060
+ 2023-11-06 22:37:33.009828: train_loss -0.8849
1061
+ 2023-11-06 22:37:33.010006: val_loss -0.8606
1062
+ 2023-11-06 22:37:33.010088: Pseudo dice [0.8825]
1063
+ 2023-11-06 22:37:33.010176: Epoch time: 176.43 s
1064
+ 2023-11-06 22:37:34.243649:
1065
+ 2023-11-06 22:37:34.243816: Epoch 139
1066
+ 2023-11-06 22:37:34.244002: Current learning rate: 0.00874
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__2d/plans.json ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Dataset721_TSPrimeCTVP",
3
+ "plans_name": "nnUNetPlans",
4
+ "original_median_spacing_after_transp": [
5
+ 2.5,
6
+ 1.269531011581421,
7
+ 1.269531011581421
8
+ ],
9
+ "original_median_shape_after_transp": [
10
+ 241,
11
+ 512,
12
+ 512
13
+ ],
14
+ "image_reader_writer": "SimpleITKIO",
15
+ "transpose_forward": [
16
+ 0,
17
+ 1,
18
+ 2
19
+ ],
20
+ "transpose_backward": [
21
+ 0,
22
+ 1,
23
+ 2
24
+ ],
25
+ "configurations": {
26
+ "2d": {
27
+ "data_identifier": "nnUNetPlans_2d",
28
+ "preprocessor_name": "DefaultPreprocessor",
29
+ "batch_size": 12,
30
+ "patch_size": [
31
+ 512,
32
+ 512
33
+ ],
34
+ "median_image_size_in_voxels": [
35
+ 512.0,
36
+ 512.0
37
+ ],
38
+ "spacing": [
39
+ 1.269531011581421,
40
+ 1.269531011581421
41
+ ],
42
+ "normalization_schemes": [
43
+ "CTNormalization"
44
+ ],
45
+ "use_mask_for_norm": [
46
+ false
47
+ ],
48
+ "UNet_class_name": "PlainConvUNet",
49
+ "UNet_base_num_features": 32,
50
+ "n_conv_per_stage_encoder": [
51
+ 2,
52
+ 2,
53
+ 2,
54
+ 2,
55
+ 2,
56
+ 2,
57
+ 2,
58
+ 2
59
+ ],
60
+ "n_conv_per_stage_decoder": [
61
+ 2,
62
+ 2,
63
+ 2,
64
+ 2,
65
+ 2,
66
+ 2,
67
+ 2
68
+ ],
69
+ "num_pool_per_axis": [
70
+ 7,
71
+ 7
72
+ ],
73
+ "pool_op_kernel_sizes": [
74
+ [
75
+ 1,
76
+ 1
77
+ ],
78
+ [
79
+ 2,
80
+ 2
81
+ ],
82
+ [
83
+ 2,
84
+ 2
85
+ ],
86
+ [
87
+ 2,
88
+ 2
89
+ ],
90
+ [
91
+ 2,
92
+ 2
93
+ ],
94
+ [
95
+ 2,
96
+ 2
97
+ ],
98
+ [
99
+ 2,
100
+ 2
101
+ ],
102
+ [
103
+ 2,
104
+ 2
105
+ ]
106
+ ],
107
+ "conv_kernel_sizes": [
108
+ [
109
+ 3,
110
+ 3
111
+ ],
112
+ [
113
+ 3,
114
+ 3
115
+ ],
116
+ [
117
+ 3,
118
+ 3
119
+ ],
120
+ [
121
+ 3,
122
+ 3
123
+ ],
124
+ [
125
+ 3,
126
+ 3
127
+ ],
128
+ [
129
+ 3,
130
+ 3
131
+ ],
132
+ [
133
+ 3,
134
+ 3
135
+ ],
136
+ [
137
+ 3,
138
+ 3
139
+ ]
140
+ ],
141
+ "unet_max_num_features": 512,
142
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
143
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
144
+ "resampling_fn_data_kwargs": {
145
+ "is_seg": false,
146
+ "order": 3,
147
+ "order_z": 0,
148
+ "force_separate_z": null
149
+ },
150
+ "resampling_fn_seg_kwargs": {
151
+ "is_seg": true,
152
+ "order": 1,
153
+ "order_z": 0,
154
+ "force_separate_z": null
155
+ },
156
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
157
+ "resampling_fn_probabilities_kwargs": {
158
+ "is_seg": false,
159
+ "order": 1,
160
+ "order_z": 0,
161
+ "force_separate_z": null
162
+ },
163
+ "batch_dice": true
164
+ },
165
+ "3d_lowres": {
166
+ "data_identifier": "nnUNetPlans_3d_lowres",
167
+ "preprocessor_name": "DefaultPreprocessor",
168
+ "batch_size": 2,
169
+ "patch_size": [
170
+ 80,
171
+ 192,
172
+ 160
173
+ ],
174
+ "median_image_size_in_voxels": [
175
+ 130,
176
+ 275,
177
+ 275
178
+ ],
179
+ "spacing": [
180
+ 4.650736429273743,
181
+ 2.361701649461784,
182
+ 2.361701649461784
183
+ ],
184
+ "normalization_schemes": [
185
+ "CTNormalization"
186
+ ],
187
+ "use_mask_for_norm": [
188
+ false
189
+ ],
190
+ "UNet_class_name": "PlainConvUNet",
191
+ "UNet_base_num_features": 32,
192
+ "n_conv_per_stage_encoder": [
193
+ 2,
194
+ 2,
195
+ 2,
196
+ 2,
197
+ 2,
198
+ 2
199
+ ],
200
+ "n_conv_per_stage_decoder": [
201
+ 2,
202
+ 2,
203
+ 2,
204
+ 2,
205
+ 2
206
+ ],
207
+ "num_pool_per_axis": [
208
+ 4,
209
+ 5,
210
+ 5
211
+ ],
212
+ "pool_op_kernel_sizes": [
213
+ [
214
+ 1,
215
+ 1,
216
+ 1
217
+ ],
218
+ [
219
+ 2,
220
+ 2,
221
+ 2
222
+ ],
223
+ [
224
+ 2,
225
+ 2,
226
+ 2
227
+ ],
228
+ [
229
+ 2,
230
+ 2,
231
+ 2
232
+ ],
233
+ [
234
+ 2,
235
+ 2,
236
+ 2
237
+ ],
238
+ [
239
+ 1,
240
+ 2,
241
+ 2
242
+ ]
243
+ ],
244
+ "conv_kernel_sizes": [
245
+ [
246
+ 3,
247
+ 3,
248
+ 3
249
+ ],
250
+ [
251
+ 3,
252
+ 3,
253
+ 3
254
+ ],
255
+ [
256
+ 3,
257
+ 3,
258
+ 3
259
+ ],
260
+ [
261
+ 3,
262
+ 3,
263
+ 3
264
+ ],
265
+ [
266
+ 3,
267
+ 3,
268
+ 3
269
+ ],
270
+ [
271
+ 3,
272
+ 3,
273
+ 3
274
+ ]
275
+ ],
276
+ "unet_max_num_features": 320,
277
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
278
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
279
+ "resampling_fn_data_kwargs": {
280
+ "is_seg": false,
281
+ "order": 3,
282
+ "order_z": 0,
283
+ "force_separate_z": null
284
+ },
285
+ "resampling_fn_seg_kwargs": {
286
+ "is_seg": true,
287
+ "order": 1,
288
+ "order_z": 0,
289
+ "force_separate_z": null
290
+ },
291
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
292
+ "resampling_fn_probabilities_kwargs": {
293
+ "is_seg": false,
294
+ "order": 1,
295
+ "order_z": 0,
296
+ "force_separate_z": null
297
+ },
298
+ "batch_dice": false,
299
+ "next_stage": "3d_cascade_fullres"
300
+ },
301
+ "3d_fullres": {
302
+ "data_identifier": "nnUNetPlans_3d_fullres",
303
+ "preprocessor_name": "DefaultPreprocessor",
304
+ "batch_size": 2,
305
+ "patch_size": [
306
+ 80,
307
+ 192,
308
+ 160
309
+ ],
310
+ "median_image_size_in_voxels": [
311
+ 241.0,
312
+ 512.0,
313
+ 512.0
314
+ ],
315
+ "spacing": [
316
+ 2.5,
317
+ 1.269531011581421,
318
+ 1.269531011581421
319
+ ],
320
+ "normalization_schemes": [
321
+ "CTNormalization"
322
+ ],
323
+ "use_mask_for_norm": [
324
+ false
325
+ ],
326
+ "UNet_class_name": "PlainConvUNet",
327
+ "UNet_base_num_features": 32,
328
+ "n_conv_per_stage_encoder": [
329
+ 2,
330
+ 2,
331
+ 2,
332
+ 2,
333
+ 2,
334
+ 2
335
+ ],
336
+ "n_conv_per_stage_decoder": [
337
+ 2,
338
+ 2,
339
+ 2,
340
+ 2,
341
+ 2
342
+ ],
343
+ "num_pool_per_axis": [
344
+ 4,
345
+ 5,
346
+ 5
347
+ ],
348
+ "pool_op_kernel_sizes": [
349
+ [
350
+ 1,
351
+ 1,
352
+ 1
353
+ ],
354
+ [
355
+ 2,
356
+ 2,
357
+ 2
358
+ ],
359
+ [
360
+ 2,
361
+ 2,
362
+ 2
363
+ ],
364
+ [
365
+ 2,
366
+ 2,
367
+ 2
368
+ ],
369
+ [
370
+ 2,
371
+ 2,
372
+ 2
373
+ ],
374
+ [
375
+ 1,
376
+ 2,
377
+ 2
378
+ ]
379
+ ],
380
+ "conv_kernel_sizes": [
381
+ [
382
+ 3,
383
+ 3,
384
+ 3
385
+ ],
386
+ [
387
+ 3,
388
+ 3,
389
+ 3
390
+ ],
391
+ [
392
+ 3,
393
+ 3,
394
+ 3
395
+ ],
396
+ [
397
+ 3,
398
+ 3,
399
+ 3
400
+ ],
401
+ [
402
+ 3,
403
+ 3,
404
+ 3
405
+ ],
406
+ [
407
+ 3,
408
+ 3,
409
+ 3
410
+ ]
411
+ ],
412
+ "unet_max_num_features": 320,
413
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
414
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
415
+ "resampling_fn_data_kwargs": {
416
+ "is_seg": false,
417
+ "order": 3,
418
+ "order_z": 0,
419
+ "force_separate_z": null
420
+ },
421
+ "resampling_fn_seg_kwargs": {
422
+ "is_seg": true,
423
+ "order": 1,
424
+ "order_z": 0,
425
+ "force_separate_z": null
426
+ },
427
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
428
+ "resampling_fn_probabilities_kwargs": {
429
+ "is_seg": false,
430
+ "order": 1,
431
+ "order_z": 0,
432
+ "force_separate_z": null
433
+ },
434
+ "batch_dice": true
435
+ },
436
+ "3d_cascade_fullres": {
437
+ "inherits_from": "3d_fullres",
438
+ "previous_stage": "3d_lowres"
439
+ }
440
+ },
441
+ "experiment_planner_used": "ExperimentPlanner",
442
+ "label_manager": "LabelManager",
443
+ "foreground_intensity_properties_per_channel": {
444
+ "0": {
445
+ "max": 882.0,
446
+ "mean": 45.35713577270508,
447
+ "median": 48.0,
448
+ "min": -118.0,
449
+ "percentile_00_5": -48.0,
450
+ "percentile_99_5": 103.0,
451
+ "std": 26.203161239624023
452
+ }
453
+ }
454
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "CT"
4
+ },
5
+ "labels": {
6
+ "background": 0,
7
+ "Ctvp": 1
8
+ },
9
+ "numTraining": 60,
10
+ "file_ending": ".nii.gz",
11
+ "numTest": 0
12
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "foreground_intensity_properties_per_channel": {
3
+ "0": {
4
+ "max": 882.0,
5
+ "mean": 45.35713577270508,
6
+ "median": 48.0,
7
+ "min": -118.0,
8
+ "percentile_00_5": -48.0,
9
+ "percentile_99_5": 103.0,
10
+ "std": 26.203161239624023
11
+ }
12
+ },
13
+ "median_relative_size_after_cropping": 1.0,
14
+ "shapes_after_crop": [
15
+ [
16
+ 230,
17
+ 512,
18
+ 512
19
+ ],
20
+ [
21
+ 240,
22
+ 512,
23
+ 512
24
+ ],
25
+ [
26
+ 260,
27
+ 512,
28
+ 512
29
+ ],
30
+ [
31
+ 215,
32
+ 512,
33
+ 512
34
+ ],
35
+ [
36
+ 260,
37
+ 512,
38
+ 512
39
+ ],
40
+ [
41
+ 220,
42
+ 512,
43
+ 512
44
+ ],
45
+ [
46
+ 210,
47
+ 512,
48
+ 512
49
+ ],
50
+ [
51
+ 240,
52
+ 512,
53
+ 512
54
+ ],
55
+ [
56
+ 265,
57
+ 512,
58
+ 512
59
+ ],
60
+ [
61
+ 229,
62
+ 512,
63
+ 512
64
+ ],
65
+ [
66
+ 230,
67
+ 512,
68
+ 512
69
+ ],
70
+ [
71
+ 243,
72
+ 512,
73
+ 512
74
+ ],
75
+ [
76
+ 230,
77
+ 512,
78
+ 512
79
+ ],
80
+ [
81
+ 250,
82
+ 512,
83
+ 512
84
+ ],
85
+ [
86
+ 250,
87
+ 512,
88
+ 512
89
+ ],
90
+ [
91
+ 245,
92
+ 512,
93
+ 512
94
+ ],
95
+ [
96
+ 235,
97
+ 512,
98
+ 512
99
+ ],
100
+ [
101
+ 250,
102
+ 512,
103
+ 512
104
+ ],
105
+ [
106
+ 242,
107
+ 512,
108
+ 512
109
+ ],
110
+ [
111
+ 241,
112
+ 512,
113
+ 512
114
+ ],
115
+ [
116
+ 210,
117
+ 512,
118
+ 512
119
+ ],
120
+ [
121
+ 255,
122
+ 512,
123
+ 512
124
+ ],
125
+ [
126
+ 246,
127
+ 512,
128
+ 512
129
+ ],
130
+ [
131
+ 240,
132
+ 512,
133
+ 512
134
+ ],
135
+ [
136
+ 245,
137
+ 512,
138
+ 512
139
+ ],
140
+ [
141
+ 250,
142
+ 512,
143
+ 512
144
+ ],
145
+ [
146
+ 249,
147
+ 512,
148
+ 512
149
+ ],
150
+ [
151
+ 210,
152
+ 512,
153
+ 512
154
+ ],
155
+ [
156
+ 210,
157
+ 512,
158
+ 512
159
+ ],
160
+ [
161
+ 244,
162
+ 512,
163
+ 512
164
+ ],
165
+ [
166
+ 230,
167
+ 512,
168
+ 512
169
+ ],
170
+ [
171
+ 235,
172
+ 512,
173
+ 512
174
+ ],
175
+ [
176
+ 260,
177
+ 512,
178
+ 512
179
+ ],
180
+ [
181
+ 241,
182
+ 512,
183
+ 512
184
+ ],
185
+ [
186
+ 220,
187
+ 512,
188
+ 512
189
+ ],
190
+ [
191
+ 240,
192
+ 512,
193
+ 512
194
+ ],
195
+ [
196
+ 190,
197
+ 512,
198
+ 512
199
+ ],
200
+ [
201
+ 255,
202
+ 512,
203
+ 512
204
+ ],
205
+ [
206
+ 230,
207
+ 512,
208
+ 512
209
+ ],
210
+ [
211
+ 255,
212
+ 512,
213
+ 512
214
+ ],
215
+ [
216
+ 236,
217
+ 512,
218
+ 512
219
+ ],
220
+ [
221
+ 241,
222
+ 512,
223
+ 512
224
+ ],
225
+ [
226
+ 220,
227
+ 512,
228
+ 512
229
+ ],
230
+ [
231
+ 241,
232
+ 512,
233
+ 512
234
+ ],
235
+ [
236
+ 245,
237
+ 512,
238
+ 512
239
+ ],
240
+ [
241
+ 241,
242
+ 512,
243
+ 512
244
+ ],
245
+ [
246
+ 250,
247
+ 512,
248
+ 512
249
+ ],
250
+ [
251
+ 210,
252
+ 512,
253
+ 512
254
+ ],
255
+ [
256
+ 250,
257
+ 512,
258
+ 512
259
+ ],
260
+ [
261
+ 266,
262
+ 512,
263
+ 512
264
+ ],
265
+ [
266
+ 220,
267
+ 512,
268
+ 512
269
+ ],
270
+ [
271
+ 230,
272
+ 512,
273
+ 512
274
+ ],
275
+ [
276
+ 280,
277
+ 512,
278
+ 512
279
+ ],
280
+ [
281
+ 260,
282
+ 512,
283
+ 512
284
+ ],
285
+ [
286
+ 245,
287
+ 512,
288
+ 512
289
+ ],
290
+ [
291
+ 220,
292
+ 512,
293
+ 512
294
+ ],
295
+ [
296
+ 240,
297
+ 512,
298
+ 512
299
+ ],
300
+ [
301
+ 250,
302
+ 512,
303
+ 512
304
+ ],
305
+ [
306
+ 226,
307
+ 512,
308
+ 512
309
+ ],
310
+ [
311
+ 240,
312
+ 512,
313
+ 512
314
+ ]
315
+ ],
316
+ "spacings": [
317
+ [
318
+ 2.5,
319
+ 1.269531011581421,
320
+ 1.269531011581421
321
+ ],
322
+ [
323
+ 2.5,
324
+ 1.269531011581421,
325
+ 1.269531011581421
326
+ ],
327
+ [
328
+ 2.5,
329
+ 1.269531011581421,
330
+ 1.269531011581421
331
+ ],
332
+ [
333
+ 2.5,
334
+ 1.269531011581421,
335
+ 1.269531011581421
336
+ ],
337
+ [
338
+ 2.5,
339
+ 1.269531011581421,
340
+ 1.269531011581421
341
+ ],
342
+ [
343
+ 2.5,
344
+ 1.269531011581421,
345
+ 1.269531011581421
346
+ ],
347
+ [
348
+ 2.5,
349
+ 1.269531011581421,
350
+ 1.269531011581421
351
+ ],
352
+ [
353
+ 2.5,
354
+ 1.269531011581421,
355
+ 1.269531011581421
356
+ ],
357
+ [
358
+ 2.5,
359
+ 1.269531011581421,
360
+ 1.269531011581421
361
+ ],
362
+ [
363
+ 2.5,
364
+ 1.269531011581421,
365
+ 1.269531011581421
366
+ ],
367
+ [
368
+ 2.5,
369
+ 1.269531011581421,
370
+ 1.269531011581421
371
+ ],
372
+ [
373
+ 2.5,
374
+ 1.269531011581421,
375
+ 1.269531011581421
376
+ ],
377
+ [
378
+ 2.5,
379
+ 1.269531011581421,
380
+ 1.269531011581421
381
+ ],
382
+ [
383
+ 2.5,
384
+ 1.269531011581421,
385
+ 1.269531011581421
386
+ ],
387
+ [
388
+ 2.5,
389
+ 1.269531011581421,
390
+ 1.269531011581421
391
+ ],
392
+ [
393
+ 2.5,
394
+ 1.269531011581421,
395
+ 1.269531011581421
396
+ ],
397
+ [
398
+ 2.5,
399
+ 1.269531011581421,
400
+ 1.269531011581421
401
+ ],
402
+ [
403
+ 2.5,
404
+ 1.269531011581421,
405
+ 1.269531011581421
406
+ ],
407
+ [
408
+ 2.5,
409
+ 1.269531011581421,
410
+ 1.269531011581421
411
+ ],
412
+ [
413
+ 2.5,
414
+ 1.269531011581421,
415
+ 1.269531011581421
416
+ ],
417
+ [
418
+ 2.5,
419
+ 1.269531011581421,
420
+ 1.269531011581421
421
+ ],
422
+ [
423
+ 2.5,
424
+ 1.269531011581421,
425
+ 1.269531011581421
426
+ ],
427
+ [
428
+ 2.5,
429
+ 1.269531011581421,
430
+ 1.269531011581421
431
+ ],
432
+ [
433
+ 2.5,
434
+ 1.269531011581421,
435
+ 1.269531011581421
436
+ ],
437
+ [
438
+ 2.5,
439
+ 1.269531011581421,
440
+ 1.269531011581421
441
+ ],
442
+ [
443
+ 2.5,
444
+ 1.269531011581421,
445
+ 1.269531011581421
446
+ ],
447
+ [
448
+ 2.5,
449
+ 1.269531011581421,
450
+ 1.269531011581421
451
+ ],
452
+ [
453
+ 2.5,
454
+ 1.269531011581421,
455
+ 1.269531011581421
456
+ ],
457
+ [
458
+ 2.5,
459
+ 1.269531011581421,
460
+ 1.269531011581421
461
+ ],
462
+ [
463
+ 2.5,
464
+ 1.269531011581421,
465
+ 1.269531011581421
466
+ ],
467
+ [
468
+ 2.5,
469
+ 1.269531011581421,
470
+ 1.269531011581421
471
+ ],
472
+ [
473
+ 2.5,
474
+ 1.269531011581421,
475
+ 1.269531011581421
476
+ ],
477
+ [
478
+ 2.5,
479
+ 1.269531011581421,
480
+ 1.269531011581421
481
+ ],
482
+ [
483
+ 2.5,
484
+ 1.269531011581421,
485
+ 1.269531011581421
486
+ ],
487
+ [
488
+ 2.5,
489
+ 1.269531011581421,
490
+ 1.269531011581421
491
+ ],
492
+ [
493
+ 2.5,
494
+ 1.269531011581421,
495
+ 1.269531011581421
496
+ ],
497
+ [
498
+ 2.5,
499
+ 1.269531011581421,
500
+ 1.269531011581421
501
+ ],
502
+ [
503
+ 2.5,
504
+ 1.269531011581421,
505
+ 1.269531011581421
506
+ ],
507
+ [
508
+ 2.5,
509
+ 1.269531011581421,
510
+ 1.269531011581421
511
+ ],
512
+ [
513
+ 2.5,
514
+ 1.269531011581421,
515
+ 1.269531011581421
516
+ ],
517
+ [
518
+ 2.5,
519
+ 1.269531011581421,
520
+ 1.269531011581421
521
+ ],
522
+ [
523
+ 2.5,
524
+ 1.269531011581421,
525
+ 1.269531011581421
526
+ ],
527
+ [
528
+ 2.5,
529
+ 1.269531011581421,
530
+ 1.269531011581421
531
+ ],
532
+ [
533
+ 2.5,
534
+ 1.269531011581421,
535
+ 1.269531011581421
536
+ ],
537
+ [
538
+ 2.5,
539
+ 1.269531011581421,
540
+ 1.269531011581421
541
+ ],
542
+ [
543
+ 2.5,
544
+ 1.269531011581421,
545
+ 1.269531011581421
546
+ ],
547
+ [
548
+ 2.5,
549
+ 1.269531011581421,
550
+ 1.269531011581421
551
+ ],
552
+ [
553
+ 2.5,
554
+ 1.269531011581421,
555
+ 1.269531011581421
556
+ ],
557
+ [
558
+ 2.5,
559
+ 1.269531011581421,
560
+ 1.269531011581421
561
+ ],
562
+ [
563
+ 2.5,
564
+ 1.269531011581421,
565
+ 1.269531011581421
566
+ ],
567
+ [
568
+ 2.5,
569
+ 1.269531011581421,
570
+ 1.269531011581421
571
+ ],
572
+ [
573
+ 2.5,
574
+ 1.269531011581421,
575
+ 1.269531011581421
576
+ ],
577
+ [
578
+ 2.5,
579
+ 1.269531011581421,
580
+ 1.269531011581421
581
+ ],
582
+ [
583
+ 2.5,
584
+ 1.269531011581421,
585
+ 1.269531011581421
586
+ ],
587
+ [
588
+ 2.5,
589
+ 1.269531011581421,
590
+ 1.269531011581421
591
+ ],
592
+ [
593
+ 2.5,
594
+ 1.269531011581421,
595
+ 1.269531011581421
596
+ ],
597
+ [
598
+ 2.5,
599
+ 1.269531011581421,
600
+ 1.269531011581421
601
+ ],
602
+ [
603
+ 2.5,
604
+ 1.269531011581421,
605
+ 1.269531011581421
606
+ ],
607
+ [
608
+ 2.5,
609
+ 1.269531011581421,
610
+ 1.269531011581421
611
+ ],
612
+ [
613
+ 2.5,
614
+ 1.269531011581421,
615
+ 1.269531011581421
616
+ ]
617
+ ]
618
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ef878e91c4d092fcb1fac17cdd93047591aed3e55017dcd435c116ed378e8bf
3
+ size 246495777
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4352832f3cd20321404d8b4a0e63276eedaa5f8509a2e4481db868b46483d3b
3
+ size 246496213
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_best_ema": "0.8600587025587667",
3
+ "batch_size": "2",
4
+ "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}",
5
+ "configuration_name": "3d_fullres",
6
+ "cudnn_version": 8500,
7
+ "current_epoch": "200",
8
+ "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7ff387736650>",
9
+ "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7ff387736610>",
10
+ "dataloader_train.num_processes": "12",
11
+ "dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [80, 192, 160], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-0.5235987755982988, 0.5235987755982988), angle_y = (-0.5235987755982988, 0.5235987755982988), angle_z = (-0.5235987755982988, 0.5235987755982988), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
+ "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7ff387735650>",
13
+ "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7ff387736a50>",
14
+ "dataloader_val.num_processes": "6",
15
+ "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
+ "dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}",
17
+ "device": "cuda:0",
18
+ "disable_checkpointing": "False",
19
+ "fold": "0",
20
+ "folder_with_segs_from_previous_stage": "None",
21
+ "gpu_name": "NVIDIA GeForce GTX 1080 Ti",
22
+ "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7ff38c21ded0>",
23
+ "hostname": "vipadmin-Z10PE-D16-WS",
24
+ "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
+ "initial_lr": "0.01",
26
+ "is_cascaded": "False",
27
+ "is_ddp": "False",
28
+ "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7ff387f857d0>",
29
+ "local_rank": "0",
30
+ "log_file": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_5_04_09_40.txt",
31
+ "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7ff38d58c7d0>",
32
+ "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
+ "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7ff38c234510>",
34
+ "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}, 'configuration': '3d_fullres', 'fold': 0, 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
+ "network": "PlainConvUNet",
36
+ "num_epochs": "1000",
37
+ "num_input_channels": "1",
38
+ "num_iterations_per_epoch": "250",
39
+ "num_val_iterations_per_epoch": "50",
40
+ "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.008189723972222198\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
+ "output_folder": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0",
42
+ "output_folder_base": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres",
43
+ "oversample_foreground_percent": "0.33",
44
+ "plans_manager": "{'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}",
45
+ "preprocessed_dataset_folder": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/nnUNetPlans_3d_fullres",
46
+ "preprocessed_dataset_folder_base": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP",
47
+ "save_every": "50",
48
+ "torch_version": "2.0.1+cu117",
49
+ "unpack_dataset": "True",
50
+ "was_initialized": "True",
51
+ "weight_decay": "3e-05"
52
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/network_architecture ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ digraph {
2
+ graph [bgcolor="#FFFFFF" color="#000000" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" pad="1.0,0.5" rankdir=LR]
3
+ node [color="#000000" fillcolor="#E8E8E8" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" shape=box style=filled]
4
+ edge [color="#000000" fontcolor="#000000" fontname=Times fontsize=10 style=solid]
5
+ "/outputs/109" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
6
+ "/outputs/110" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
7
+ "/outputs/111" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
8
+ "/outputs/112" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
9
+ "/outputs/113" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
10
+ "/outputs/114" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
11
+ "/outputs/115" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
12
+ "/outputs/116" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
13
+ "/outputs/117" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
14
+ "/outputs/118" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
15
+ "/outputs/119" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
16
+ "/outputs/120" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
17
+ "/outputs/121" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
18
+ "/outputs/122" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
19
+ "/outputs/123" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
20
+ "/outputs/124" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
21
+ "/outputs/125" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
22
+ "/outputs/126" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
23
+ "/outputs/127" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
24
+ "/outputs/128" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
25
+ "/outputs/129" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
26
+ "/outputs/130" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
27
+ "/outputs/131" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
28
+ "/outputs/132" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
29
+ "/outputs/133" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
30
+ "/outputs/134" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
31
+ "/outputs/135" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
32
+ "/outputs/136" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
33
+ "/outputs/137" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
34
+ "/outputs/138" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
35
+ "/outputs/139" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 2, 2]</td></tr></table>>]
36
+ "/outputs/140" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
37
+ "/outputs/141" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
38
+ "/outputs/142" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
39
+ "/outputs/143" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
40
+ "/outputs/144" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
41
+ "/outputs/145" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [1, 2, 2], stride: [1, 2, 2]</td></tr></table>>]
42
+ "/outputs/146" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
43
+ "/outputs/147" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
44
+ "/outputs/148" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
45
+ "/outputs/149" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
46
+ "/outputs/150" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
47
+ "/outputs/151" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
48
+ "/outputs/152" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
49
+ "/outputs/153" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
50
+ "/outputs/154" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
51
+ "/outputs/155" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
52
+ "/outputs/156" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
53
+ "/outputs/157" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
54
+ "/outputs/158" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
55
+ "/outputs/159" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
56
+ "/outputs/160" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
57
+ "/outputs/161" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
58
+ "/outputs/162" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
59
+ "/outputs/163" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
60
+ "/outputs/164" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
61
+ "/outputs/165" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
62
+ "/outputs/166" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
63
+ "/outputs/167" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
64
+ "/outputs/168" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
65
+ "/outputs/169" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
66
+ "/outputs/170" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
67
+ "/outputs/171" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
68
+ "/outputs/172" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
69
+ "/outputs/173" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
70
+ "/outputs/174" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
71
+ "/outputs/175" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
72
+ "/outputs/176" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
73
+ "/outputs/177" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
74
+ "/outputs/178" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
75
+ "/outputs/179" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
76
+ "/outputs/180" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
77
+ "/outputs/181" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
78
+ "/outputs/182" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
79
+ "/outputs/183" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
80
+ "/outputs/184" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
81
+ "/outputs/185" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
82
+ "/outputs/186" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
83
+ "/outputs/187" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
84
+ "/outputs/188" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
85
+ "/outputs/189" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
86
+ "/outputs/109" -> "/outputs/110" [label="1x32x80x192x160"]
87
+ "/outputs/110" -> "/outputs/111" [label="1x32x80x192x160"]
88
+ "/outputs/111" -> "/outputs/112" [label="1x32x80x192x160"]
89
+ "/outputs/112" -> "/outputs/113" [label="1x32x80x192x160"]
90
+ "/outputs/113" -> "/outputs/114" [label="1x32x80x192x160"]
91
+ "/outputs/114" -> "/outputs/115" [label="1x32x80x192x160"]
92
+ "/outputs/114" -> "/outputs/182" [label="1x32x80x192x160"]
93
+ "/outputs/115" -> "/outputs/116" [label="1x64x40x96x80"]
94
+ "/outputs/116" -> "/outputs/117" [label="1x64x40x96x80"]
95
+ "/outputs/117" -> "/outputs/118" [label="1x64x40x96x80"]
96
+ "/outputs/118" -> "/outputs/119" [label="1x64x40x96x80"]
97
+ "/outputs/119" -> "/outputs/120" [label="1x64x40x96x80"]
98
+ "/outputs/120" -> "/outputs/121" [label="1x64x40x96x80"]
99
+ "/outputs/120" -> "/outputs/173" [label="1x64x40x96x80"]
100
+ "/outputs/121" -> "/outputs/122" [label="1x128x20x48x40"]
101
+ "/outputs/122" -> "/outputs/123" [label="1x128x20x48x40"]
102
+ "/outputs/123" -> "/outputs/124" [label="1x128x20x48x40"]
103
+ "/outputs/124" -> "/outputs/125" [label="1x128x20x48x40"]
104
+ "/outputs/125" -> "/outputs/126" [label="1x128x20x48x40"]
105
+ "/outputs/126" -> "/outputs/127" [label="1x128x20x48x40"]
106
+ "/outputs/126" -> "/outputs/164" [label="1x128x20x48x40"]
107
+ "/outputs/127" -> "/outputs/128" [label="1x256x10x24x20"]
108
+ "/outputs/128" -> "/outputs/129" [label="1x256x10x24x20"]
109
+ "/outputs/129" -> "/outputs/130" [label="1x256x10x24x20"]
110
+ "/outputs/130" -> "/outputs/131" [label="1x256x10x24x20"]
111
+ "/outputs/131" -> "/outputs/132" [label="1x256x10x24x20"]
112
+ "/outputs/132" -> "/outputs/133" [label="1x256x10x24x20"]
113
+ "/outputs/132" -> "/outputs/155" [label="1x256x10x24x20"]
114
+ "/outputs/133" -> "/outputs/134" [label="1x320x5x12x10"]
115
+ "/outputs/134" -> "/outputs/135" [label="1x320x5x12x10"]
116
+ "/outputs/135" -> "/outputs/136" [label="1x320x5x12x10"]
117
+ "/outputs/136" -> "/outputs/137" [label="1x320x5x12x10"]
118
+ "/outputs/137" -> "/outputs/138" [label="1x320x5x12x10"]
119
+ "/outputs/138" -> "/outputs/139" [label="1x320x5x12x10"]
120
+ "/outputs/138" -> "/outputs/146" [label="1x320x5x12x10"]
121
+ "/outputs/139" -> "/outputs/140" [label="1x320x5x6x5"]
122
+ "/outputs/140" -> "/outputs/141" [label="1x320x5x6x5"]
123
+ "/outputs/141" -> "/outputs/142" [label="1x320x5x6x5"]
124
+ "/outputs/142" -> "/outputs/143" [label="1x320x5x6x5"]
125
+ "/outputs/143" -> "/outputs/144" [label="1x320x5x6x5"]
126
+ "/outputs/144" -> "/outputs/145" [label="1x320x5x6x5"]
127
+ "/outputs/145" -> "/outputs/146" [label="1x320x5x12x10"]
128
+ "/outputs/146" -> "/outputs/147" [label="1x640x5x12x10"]
129
+ "/outputs/147" -> "/outputs/148" [label="1x320x5x12x10"]
130
+ "/outputs/148" -> "/outputs/149" [label="1x320x5x12x10"]
131
+ "/outputs/149" -> "/outputs/150" [label="1x320x5x12x10"]
132
+ "/outputs/150" -> "/outputs/151" [label="1x320x5x12x10"]
133
+ "/outputs/151" -> "/outputs/152" [label="1x320x5x12x10"]
134
+ "/outputs/152" -> "/outputs/153" [label="1x320x5x12x10"]
135
+ "/outputs/152" -> "/outputs/154" [label="1x320x5x12x10"]
136
+ "/outputs/154" -> "/outputs/155" [label="1x256x10x24x20"]
137
+ "/outputs/155" -> "/outputs/156" [label="1x512x10x24x20"]
138
+ "/outputs/156" -> "/outputs/157" [label="1x256x10x24x20"]
139
+ "/outputs/157" -> "/outputs/158" [label="1x256x10x24x20"]
140
+ "/outputs/158" -> "/outputs/159" [label="1x256x10x24x20"]
141
+ "/outputs/159" -> "/outputs/160" [label="1x256x10x24x20"]
142
+ "/outputs/160" -> "/outputs/161" [label="1x256x10x24x20"]
143
+ "/outputs/161" -> "/outputs/162" [label="1x256x10x24x20"]
144
+ "/outputs/161" -> "/outputs/163" [label="1x256x10x24x20"]
145
+ "/outputs/163" -> "/outputs/164" [label="1x128x20x48x40"]
146
+ "/outputs/164" -> "/outputs/165" [label="1x256x20x48x40"]
147
+ "/outputs/165" -> "/outputs/166" [label="1x128x20x48x40"]
148
+ "/outputs/166" -> "/outputs/167" [label="1x128x20x48x40"]
149
+ "/outputs/167" -> "/outputs/168" [label="1x128x20x48x40"]
150
+ "/outputs/168" -> "/outputs/169" [label="1x128x20x48x40"]
151
+ "/outputs/169" -> "/outputs/170" [label="1x128x20x48x40"]
152
+ "/outputs/170" -> "/outputs/171" [label="1x128x20x48x40"]
153
+ "/outputs/170" -> "/outputs/172" [label="1x128x20x48x40"]
154
+ "/outputs/172" -> "/outputs/173" [label="1x64x40x96x80"]
155
+ "/outputs/173" -> "/outputs/174" [label="1x128x40x96x80"]
156
+ "/outputs/174" -> "/outputs/175" [label="1x64x40x96x80"]
157
+ "/outputs/175" -> "/outputs/176" [label="1x64x40x96x80"]
158
+ "/outputs/176" -> "/outputs/177" [label="1x64x40x96x80"]
159
+ "/outputs/177" -> "/outputs/178" [label="1x64x40x96x80"]
160
+ "/outputs/178" -> "/outputs/179" [label="1x64x40x96x80"]
161
+ "/outputs/179" -> "/outputs/180" [label="1x64x40x96x80"]
162
+ "/outputs/179" -> "/outputs/181" [label="1x64x40x96x80"]
163
+ "/outputs/181" -> "/outputs/182" [label="1x32x80x192x160"]
164
+ "/outputs/182" -> "/outputs/183" [label="1x64x80x192x160"]
165
+ "/outputs/183" -> "/outputs/184" [label="1x32x80x192x160"]
166
+ "/outputs/184" -> "/outputs/185" [label="1x32x80x192x160"]
167
+ "/outputs/185" -> "/outputs/186" [label="1x32x80x192x160"]
168
+ "/outputs/186" -> "/outputs/187" [label="1x32x80x192x160"]
169
+ "/outputs/187" -> "/outputs/188" [label="1x32x80x192x160"]
170
+ "/outputs/188" -> "/outputs/189" [label="1x32x80x192x160"]
171
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png ADDED
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_1_12_29_08.txt ADDED
@@ -0,0 +1,1654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}
14
+
15
+ 2023-11-01 12:29:10.475205: unpacking dataset...
16
+ 2023-11-01 12:30:09.566403: unpacking done...
17
+ 2023-11-01 12:30:09.567025: do_dummy_2d_data_aug: False
18
+ 2023-11-01 12:30:09.567583: Creating new 5-fold cross-validation split...
19
+ 2023-11-01 12:30:09.568629: Desired fold for training: 0
20
+ 2023-11-01 12:30:09.568710: This split has 48 training and 12 validation cases.
21
+ 2023-11-01 12:30:42.307810: Unable to plot network architecture:
22
+ 2023-11-01 12:30:42.307939: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
23
+ 2023-11-01 12:30:42.412204:
24
+ 2023-11-01 12:30:42.412276: Epoch 0
25
+ 2023-11-01 12:30:42.412391: Current learning rate: 0.01
26
+ 2023-11-01 12:38:11.787683: train_loss -0.0271
27
+ 2023-11-01 12:38:11.787974: val_loss -0.2688
28
+ 2023-11-01 12:38:11.788103: Pseudo dice [0.0]
29
+ 2023-11-01 12:38:11.788262: Epoch time: 449.38 s
30
+ 2023-11-01 12:38:11.788343: Yayy! New best EMA pseudo Dice: 0.0
31
+ 2023-11-01 12:38:13.221239:
32
+ 2023-11-01 12:38:13.221350: Epoch 1
33
+ 2023-11-01 12:38:13.221466: Current learning rate: 0.00999
34
+ 2023-11-01 12:43:49.587806: train_loss -0.5354
35
+ 2023-11-01 12:43:49.587957: val_loss -0.6584
36
+ 2023-11-01 12:43:49.588035: Pseudo dice [0.7012]
37
+ 2023-11-01 12:43:49.588122: Epoch time: 336.37 s
38
+ 2023-11-01 12:43:49.588194: Yayy! New best EMA pseudo Dice: 0.0701
39
+ 2023-11-01 12:43:53.239235:
40
+ 2023-11-01 12:43:53.239353: Epoch 2
41
+ 2023-11-01 12:43:53.239463: Current learning rate: 0.00998
42
+ 2023-11-01 12:49:29.290002: train_loss -0.6894
43
+ 2023-11-01 12:49:29.290153: val_loss -0.6918
44
+ 2023-11-01 12:49:29.290246: Pseudo dice [0.716]
45
+ 2023-11-01 12:49:29.290331: Epoch time: 336.05 s
46
+ 2023-11-01 12:49:29.290404: Yayy! New best EMA pseudo Dice: 0.1347
47
+ 2023-11-01 12:49:32.340260:
48
+ 2023-11-01 12:49:32.340369: Epoch 3
49
+ 2023-11-01 12:49:32.340484: Current learning rate: 0.00997
50
+ 2023-11-01 12:55:08.478726: train_loss -0.7225
51
+ 2023-11-01 12:55:08.478883: val_loss -0.747
52
+ 2023-11-01 12:55:08.478960: Pseudo dice [0.7864]
53
+ 2023-11-01 12:55:08.479043: Epoch time: 336.14 s
54
+ 2023-11-01 12:55:08.479113: Yayy! New best EMA pseudo Dice: 0.1999
55
+ 2023-11-01 12:55:11.450947:
56
+ 2023-11-01 12:55:11.451077: Epoch 4
57
+ 2023-11-01 12:55:11.451180: Current learning rate: 0.00996
58
+ 2023-11-01 13:00:47.726259: train_loss -0.7348
59
+ 2023-11-01 13:00:47.726417: val_loss -0.7852
60
+ 2023-11-01 13:00:47.726495: Pseudo dice [0.8192]
61
+ 2023-11-01 13:00:47.726578: Epoch time: 336.28 s
62
+ 2023-11-01 13:00:47.726649: Yayy! New best EMA pseudo Dice: 0.2618
63
+ 2023-11-01 13:00:50.931026:
64
+ 2023-11-01 13:00:50.931148: Epoch 5
65
+ 2023-11-01 13:00:50.931252: Current learning rate: 0.00995
66
+ 2023-11-01 13:06:28.122296: train_loss -0.7575
67
+ 2023-11-01 13:06:28.122471: val_loss -0.7501
68
+ 2023-11-01 13:06:28.122555: Pseudo dice [0.7729]
69
+ 2023-11-01 13:06:28.122643: Epoch time: 337.19 s
70
+ 2023-11-01 13:06:28.122721: Yayy! New best EMA pseudo Dice: 0.3129
71
+ 2023-11-01 13:06:31.918833:
72
+ 2023-11-01 13:06:31.919015: Epoch 6
73
+ 2023-11-01 13:06:31.919141: Current learning rate: 0.00995
74
+ 2023-11-01 13:12:10.663473: train_loss -0.767
75
+ 2023-11-01 13:12:10.694362: val_loss -0.7804
76
+ 2023-11-01 13:12:10.694546: Pseudo dice [0.7994]
77
+ 2023-11-01 13:12:10.694668: Epoch time: 338.75 s
78
+ 2023-11-01 13:12:10.694769: Yayy! New best EMA pseudo Dice: 0.3616
79
+ 2023-11-01 13:12:13.968759:
80
+ 2023-11-01 13:12:13.968987: Epoch 7
81
+ 2023-11-01 13:12:13.969113: Current learning rate: 0.00994
82
+ 2023-11-01 13:17:51.820216: train_loss -0.7838
83
+ 2023-11-01 13:17:51.820378: val_loss -0.8075
84
+ 2023-11-01 13:17:51.820473: Pseudo dice [0.8305]
85
+ 2023-11-01 13:17:51.820560: Epoch time: 337.85 s
86
+ 2023-11-01 13:17:51.820641: Yayy! New best EMA pseudo Dice: 0.4085
87
+ 2023-11-01 13:17:54.913375:
88
+ 2023-11-01 13:17:54.913476: Epoch 8
89
+ 2023-11-01 13:17:54.913592: Current learning rate: 0.00993
90
+ 2023-11-01 13:23:31.326909: train_loss -0.7927
91
+ 2023-11-01 13:23:31.327086: val_loss -0.7916
92
+ 2023-11-01 13:23:31.327163: Pseudo dice [0.816]
93
+ 2023-11-01 13:23:31.327246: Epoch time: 336.41 s
94
+ 2023-11-01 13:23:31.327316: Yayy! New best EMA pseudo Dice: 0.4492
95
+ 2023-11-01 13:23:34.302348:
96
+ 2023-11-01 13:23:34.302555: Epoch 9
97
+ 2023-11-01 13:23:34.302660: Current learning rate: 0.00992
98
+ 2023-11-01 13:29:10.852254: train_loss -0.7943
99
+ 2023-11-01 13:29:10.852405: val_loss -0.8185
100
+ 2023-11-01 13:29:10.852496: Pseudo dice [0.8428]
101
+ 2023-11-01 13:29:10.852577: Epoch time: 336.55 s
102
+ 2023-11-01 13:29:10.852655: Yayy! New best EMA pseudo Dice: 0.4886
103
+ 2023-11-01 13:29:13.845106:
104
+ 2023-11-01 13:29:13.845208: Epoch 10
105
+ 2023-11-01 13:29:13.845325: Current learning rate: 0.00991
106
+ 2023-11-01 13:34:50.175644: train_loss -0.801
107
+ 2023-11-01 13:34:50.175787: val_loss -0.8133
108
+ 2023-11-01 13:34:50.175877: Pseudo dice [0.831]
109
+ 2023-11-01 13:34:50.175959: Epoch time: 336.33 s
110
+ 2023-11-01 13:34:50.176030: Yayy! New best EMA pseudo Dice: 0.5228
111
+ 2023-11-01 13:34:53.293814:
112
+ 2023-11-01 13:34:53.294001: Epoch 11
113
+ 2023-11-01 13:34:53.294168: Current learning rate: 0.0099
114
+ 2023-11-01 13:40:29.841172: train_loss -0.8003
115
+ 2023-11-01 13:40:29.841327: val_loss -0.8216
116
+ 2023-11-01 13:40:29.841403: Pseudo dice [0.8397]
117
+ 2023-11-01 13:40:29.841485: Epoch time: 336.55 s
118
+ 2023-11-01 13:40:29.841598: Yayy! New best EMA pseudo Dice: 0.5545
119
+ 2023-11-01 13:40:32.892795:
120
+ 2023-11-01 13:40:32.892940: Epoch 12
121
+ 2023-11-01 13:40:32.893047: Current learning rate: 0.00989
122
+ 2023-11-01 13:46:09.287373: train_loss -0.8042
123
+ 2023-11-01 13:46:09.287523: val_loss -0.8173
124
+ 2023-11-01 13:46:09.287600: Pseudo dice [0.8365]
125
+ 2023-11-01 13:46:09.287682: Epoch time: 336.4 s
126
+ 2023-11-01 13:46:09.287760: Yayy! New best EMA pseudo Dice: 0.5827
127
+ 2023-11-01 13:46:12.232458:
128
+ 2023-11-01 13:46:12.232580: Epoch 13
129
+ 2023-11-01 13:46:12.232692: Current learning rate: 0.00988
130
+ 2023-11-01 13:51:48.559893: train_loss -0.8051
131
+ 2023-11-01 13:51:48.560048: val_loss -0.7647
132
+ 2023-11-01 13:51:48.560140: Pseudo dice [0.7543]
133
+ 2023-11-01 13:51:48.560235: Epoch time: 336.33 s
134
+ 2023-11-01 13:51:48.560306: Yayy! New best EMA pseudo Dice: 0.5999
135
+ 2023-11-01 13:51:51.636058:
136
+ 2023-11-01 13:51:51.636162: Epoch 14
137
+ 2023-11-01 13:51:51.636276: Current learning rate: 0.00987
138
+ 2023-11-01 13:57:27.880198: train_loss -0.8162
139
+ 2023-11-01 13:57:27.880373: val_loss -0.8159
140
+ 2023-11-01 13:57:27.880450: Pseudo dice [0.8386]
141
+ 2023-11-01 13:57:27.880534: Epoch time: 336.24 s
142
+ 2023-11-01 13:57:27.880605: Yayy! New best EMA pseudo Dice: 0.6237
143
+ 2023-11-01 13:57:30.857818:
144
+ 2023-11-01 13:57:30.857923: Epoch 15
145
+ 2023-11-01 13:57:30.858038: Current learning rate: 0.00986
146
+ 2023-11-01 14:03:07.147270: train_loss -0.8136
147
+ 2023-11-01 14:03:07.147419: val_loss -0.8179
148
+ 2023-11-01 14:03:07.147510: Pseudo dice [0.8396]
149
+ 2023-11-01 14:03:07.147593: Epoch time: 336.29 s
150
+ 2023-11-01 14:03:07.147664: Yayy! New best EMA pseudo Dice: 0.6453
151
+ 2023-11-01 14:03:10.251518:
152
+ 2023-11-01 14:03:10.251815: Epoch 16
153
+ 2023-11-01 14:03:10.251971: Current learning rate: 0.00986
154
+ 2023-11-01 14:08:46.691768: train_loss -0.8164
155
+ 2023-11-01 14:08:46.691912: val_loss -0.7863
156
+ 2023-11-01 14:08:46.691984: Pseudo dice [0.8058]
157
+ 2023-11-01 14:08:46.692061: Epoch time: 336.44 s
158
+ 2023-11-01 14:08:46.692127: Yayy! New best EMA pseudo Dice: 0.6614
159
+ 2023-11-01 14:08:49.702475:
160
+ 2023-11-01 14:08:49.702577: Epoch 17
161
+ 2023-11-01 14:08:49.702692: Current learning rate: 0.00985
162
+ 2023-11-01 14:14:26.236941: train_loss -0.7944
163
+ 2023-11-01 14:14:26.237129: val_loss -0.8047
164
+ 2023-11-01 14:14:26.237204: Pseudo dice [0.8229]
165
+ 2023-11-01 14:14:26.237286: Epoch time: 336.54 s
166
+ 2023-11-01 14:14:26.237356: Yayy! New best EMA pseudo Dice: 0.6775
167
+ 2023-11-01 14:14:29.427278:
168
+ 2023-11-01 14:14:29.427479: Epoch 18
169
+ 2023-11-01 14:14:29.427614: Current learning rate: 0.00984
170
+ 2023-11-01 14:20:05.867384: train_loss -0.8158
171
+ 2023-11-01 14:20:05.867539: val_loss -0.7984
172
+ 2023-11-01 14:20:05.867628: Pseudo dice [0.8069]
173
+ 2023-11-01 14:20:05.867710: Epoch time: 336.44 s
174
+ 2023-11-01 14:20:05.867781: Yayy! New best EMA pseudo Dice: 0.6905
175
+ 2023-11-01 14:20:08.897546:
176
+ 2023-11-01 14:20:08.897667: Epoch 19
177
+ 2023-11-01 14:20:08.897769: Current learning rate: 0.00983
178
+ 2023-11-01 14:25:45.449991: train_loss -0.8188
179
+ 2023-11-01 14:25:45.450138: val_loss -0.8164
180
+ 2023-11-01 14:25:45.450228: Pseudo dice [0.8323]
181
+ 2023-11-01 14:25:45.450310: Epoch time: 336.55 s
182
+ 2023-11-01 14:25:45.450380: Yayy! New best EMA pseudo Dice: 0.7046
183
+ 2023-11-01 14:25:48.423054:
184
+ 2023-11-01 14:25:48.423226: Epoch 20
185
+ 2023-11-01 14:25:48.423382: Current learning rate: 0.00982
186
+ 2023-11-01 14:31:24.932714: train_loss -0.8197
187
+ 2023-11-01 14:31:24.932868: val_loss -0.825
188
+ 2023-11-01 14:31:24.932945: Pseudo dice [0.8468]
189
+ 2023-11-01 14:31:24.933029: Epoch time: 336.51 s
190
+ 2023-11-01 14:31:24.933099: Yayy! New best EMA pseudo Dice: 0.7189
191
+ 2023-11-01 14:31:28.042007:
192
+ 2023-11-01 14:31:28.042109: Epoch 21
193
+ 2023-11-01 14:31:28.042224: Current learning rate: 0.00981
194
+ 2023-11-01 14:37:04.605043: train_loss -0.8094
195
+ 2023-11-01 14:37:04.605198: val_loss -0.8171
196
+ 2023-11-01 14:37:04.605269: Pseudo dice [0.838]
197
+ 2023-11-01 14:37:04.605345: Epoch time: 336.56 s
198
+ 2023-11-01 14:37:04.605409: Yayy! New best EMA pseudo Dice: 0.7308
199
+ 2023-11-01 14:37:07.584971:
200
+ 2023-11-01 14:37:07.585091: Epoch 22
201
+ 2023-11-01 14:37:07.585207: Current learning rate: 0.0098
202
+ 2023-11-01 14:42:43.952250: train_loss -0.8163
203
+ 2023-11-01 14:42:43.952427: val_loss -0.8214
204
+ 2023-11-01 14:42:43.952517: Pseudo dice [0.8395]
205
+ 2023-11-01 14:42:43.952599: Epoch time: 336.37 s
206
+ 2023-11-01 14:42:43.952677: Yayy! New best EMA pseudo Dice: 0.7416
207
+ 2023-11-01 14:42:46.894954:
208
+ 2023-11-01 14:42:46.895052: Epoch 23
209
+ 2023-11-01 14:42:46.895165: Current learning rate: 0.00979
210
+ 2023-11-01 14:48:23.334170: train_loss -0.8286
211
+ 2023-11-01 14:48:23.334348: val_loss -0.8133
212
+ 2023-11-01 14:48:23.334519: Pseudo dice [0.8332]
213
+ 2023-11-01 14:48:23.334684: Epoch time: 336.44 s
214
+ 2023-11-01 14:48:23.334764: Yayy! New best EMA pseudo Dice: 0.7508
215
+ 2023-11-01 14:48:26.430463:
216
+ 2023-11-01 14:48:26.430566: Epoch 24
217
+ 2023-11-01 14:48:26.430691: Current learning rate: 0.00978
218
+ 2023-11-01 14:54:03.121751: train_loss -0.8284
219
+ 2023-11-01 14:54:03.121907: val_loss -0.8135
220
+ 2023-11-01 14:54:03.121999: Pseudo dice [0.8296]
221
+ 2023-11-01 14:54:03.122081: Epoch time: 336.69 s
222
+ 2023-11-01 14:54:03.122151: Yayy! New best EMA pseudo Dice: 0.7587
223
+ 2023-11-01 14:54:06.213112:
224
+ 2023-11-01 14:54:06.213219: Epoch 25
225
+ 2023-11-01 14:54:06.213335: Current learning rate: 0.00977
226
+ 2023-11-01 14:59:42.955346: train_loss -0.8266
227
+ 2023-11-01 14:59:42.955494: val_loss -0.8312
228
+ 2023-11-01 14:59:42.955565: Pseudo dice [0.8429]
229
+ 2023-11-01 14:59:42.955641: Epoch time: 336.74 s
230
+ 2023-11-01 14:59:42.955706: Yayy! New best EMA pseudo Dice: 0.7671
231
+ 2023-11-01 14:59:45.968148:
232
+ 2023-11-01 14:59:45.968338: Epoch 26
233
+ 2023-11-01 14:59:45.968488: Current learning rate: 0.00977
234
+ 2023-11-01 15:05:22.724873: train_loss -0.8334
235
+ 2023-11-01 15:05:22.725041: val_loss -0.8357
236
+ 2023-11-01 15:05:22.725132: Pseudo dice [0.853]
237
+ 2023-11-01 15:05:22.725214: Epoch time: 336.76 s
238
+ 2023-11-01 15:05:22.725285: Yayy! New best EMA pseudo Dice: 0.7757
239
+ 2023-11-01 15:05:25.626139:
240
+ 2023-11-01 15:05:25.626322: Epoch 27
241
+ 2023-11-01 15:05:25.626501: Current learning rate: 0.00976
242
+ 2023-11-01 15:11:02.284468: train_loss -0.8351
243
+ 2023-11-01 15:11:02.284621: val_loss -0.8286
244
+ 2023-11-01 15:11:02.284725: Pseudo dice [0.8451]
245
+ 2023-11-01 15:11:02.284806: Epoch time: 336.66 s
246
+ 2023-11-01 15:11:02.284877: Yayy! New best EMA pseudo Dice: 0.7826
247
+ 2023-11-01 15:11:05.176012:
248
+ 2023-11-01 15:11:05.176114: Epoch 28
249
+ 2023-11-01 15:11:05.176226: Current learning rate: 0.00975
250
+ 2023-11-01 15:16:41.778232: train_loss -0.8403
251
+ 2023-11-01 15:16:41.778399: val_loss -0.8344
252
+ 2023-11-01 15:16:41.778475: Pseudo dice [0.851]
253
+ 2023-11-01 15:16:41.778558: Epoch time: 336.6 s
254
+ 2023-11-01 15:16:41.778628: Yayy! New best EMA pseudo Dice: 0.7895
255
+ 2023-11-01 15:16:44.724666:
256
+ 2023-11-01 15:16:44.724856: Epoch 29
257
+ 2023-11-01 15:16:44.725028: Current learning rate: 0.00974
258
+ 2023-11-01 15:22:21.276134: train_loss -0.8428
259
+ 2023-11-01 15:22:21.276274: val_loss -0.831
260
+ 2023-11-01 15:22:21.276366: Pseudo dice [0.8439]
261
+ 2023-11-01 15:22:21.276448: Epoch time: 336.55 s
262
+ 2023-11-01 15:22:21.276518: Yayy! New best EMA pseudo Dice: 0.7949
263
+ 2023-11-01 15:22:24.330993:
264
+ 2023-11-01 15:22:24.331091: Epoch 30
265
+ 2023-11-01 15:22:24.331203: Current learning rate: 0.00973
266
+ 2023-11-01 15:28:00.922952: train_loss -0.8452
267
+ 2023-11-01 15:28:00.923098: val_loss -0.8184
268
+ 2023-11-01 15:28:00.923191: Pseudo dice [0.8333]
269
+ 2023-11-01 15:28:00.923273: Epoch time: 336.59 s
270
+ 2023-11-01 15:28:00.923345: Yayy! New best EMA pseudo Dice: 0.7988
271
+ 2023-11-01 15:28:04.032981:
272
+ 2023-11-01 15:28:04.033082: Epoch 31
273
+ 2023-11-01 15:28:04.033201: Current learning rate: 0.00972
274
+ 2023-11-01 15:33:40.671951: train_loss -0.8339
275
+ 2023-11-01 15:33:40.672108: val_loss -0.8227
276
+ 2023-11-01 15:33:40.672186: Pseudo dice [0.8428]
277
+ 2023-11-01 15:33:40.672268: Epoch time: 336.64 s
278
+ 2023-11-01 15:33:40.672337: Yayy! New best EMA pseudo Dice: 0.8032
279
+ 2023-11-01 15:33:43.598649:
280
+ 2023-11-01 15:33:43.598753: Epoch 32
281
+ 2023-11-01 15:33:43.598849: Current learning rate: 0.00971
282
+ 2023-11-01 15:39:20.273665: train_loss -0.8356
283
+ 2023-11-01 15:39:20.273837: val_loss -0.8276
284
+ 2023-11-01 15:39:20.273914: Pseudo dice [0.8389]
285
+ 2023-11-01 15:39:20.273996: Epoch time: 336.68 s
286
+ 2023-11-01 15:39:20.274065: Yayy! New best EMA pseudo Dice: 0.8067
287
+ 2023-11-01 15:39:23.273413:
288
+ 2023-11-01 15:39:23.273538: Epoch 33
289
+ 2023-11-01 15:39:23.273642: Current learning rate: 0.0097
290
+ 2023-11-01 15:44:59.943406: train_loss -0.8428
291
+ 2023-11-01 15:44:59.943554: val_loss -0.8367
292
+ 2023-11-01 15:44:59.943645: Pseudo dice [0.8548]
293
+ 2023-11-01 15:44:59.943728: Epoch time: 336.67 s
294
+ 2023-11-01 15:44:59.943798: Yayy! New best EMA pseudo Dice: 0.8115
295
+ 2023-11-01 15:45:02.978674:
296
+ 2023-11-01 15:45:02.978841: Epoch 34
297
+ 2023-11-01 15:45:02.979010: Current learning rate: 0.00969
298
+ 2023-11-01 15:50:41.035711: train_loss -0.8423
299
+ 2023-11-01 15:50:41.035971: val_loss -0.8491
300
+ 2023-11-01 15:50:41.036084: Pseudo dice [0.8673]
301
+ 2023-11-01 15:50:41.036215: Epoch time: 338.06 s
302
+ 2023-11-01 15:50:41.036318: Yayy! New best EMA pseudo Dice: 0.8171
303
+ 2023-11-01 15:50:45.144126:
304
+ 2023-11-01 15:50:45.144455: Epoch 35
305
+ 2023-11-01 15:50:45.144589: Current learning rate: 0.00968
306
+ 2023-11-01 15:57:39.695797: train_loss -0.8435
307
+ 2023-11-01 15:57:39.713022: val_loss -0.8347
308
+ 2023-11-01 15:57:39.713110: Pseudo dice [0.8515]
309
+ 2023-11-01 15:57:39.713194: Epoch time: 414.55 s
310
+ 2023-11-01 15:57:39.713266: Yayy! New best EMA pseudo Dice: 0.8206
311
+ 2023-11-01 15:57:42.775419:
312
+ 2023-11-01 15:57:42.775573: Epoch 36
313
+ 2023-11-01 15:57:42.775699: Current learning rate: 0.00968
314
+ 2023-11-01 16:05:08.612645: train_loss -0.8462
315
+ 2023-11-01 16:05:08.612795: val_loss -0.8348
316
+ 2023-11-01 16:05:08.612872: Pseudo dice [0.8487]
317
+ 2023-11-01 16:05:08.612952: Epoch time: 445.84 s
318
+ 2023-11-01 16:05:08.613021: Yayy! New best EMA pseudo Dice: 0.8234
319
+ 2023-11-01 16:05:11.535625:
320
+ 2023-11-01 16:05:11.535743: Epoch 37
321
+ 2023-11-01 16:05:11.535845: Current learning rate: 0.00967
322
+ 2023-11-01 16:12:37.882628: train_loss -0.8489
323
+ 2023-11-01 16:12:37.882780: val_loss -0.8288
324
+ 2023-11-01 16:12:37.882856: Pseudo dice [0.8396]
325
+ 2023-11-01 16:12:37.882939: Epoch time: 446.35 s
326
+ 2023-11-01 16:12:37.883009: Yayy! New best EMA pseudo Dice: 0.825
327
+ 2023-11-01 16:12:41.060149:
328
+ 2023-11-01 16:12:41.060333: Epoch 38
329
+ 2023-11-01 16:12:41.060438: Current learning rate: 0.00966
330
+ 2023-11-01 16:20:06.728216: train_loss -0.848
331
+ 2023-11-01 16:20:06.728388: val_loss -0.8475
332
+ 2023-11-01 16:20:06.728465: Pseudo dice [0.8638]
333
+ 2023-11-01 16:20:06.728546: Epoch time: 445.67 s
334
+ 2023-11-01 16:20:06.728616: Yayy! New best EMA pseudo Dice: 0.8289
335
+ 2023-11-01 16:20:10.258702:
336
+ 2023-11-01 16:20:10.258880: Epoch 39
337
+ 2023-11-01 16:20:10.259030: Current learning rate: 0.00965
338
+ 2023-11-01 16:27:38.011129: train_loss -0.8533
339
+ 2023-11-01 16:27:38.011296: val_loss -0.8365
340
+ 2023-11-01 16:27:38.011374: Pseudo dice [0.8482]
341
+ 2023-11-01 16:27:38.011454: Epoch time: 447.75 s
342
+ 2023-11-01 16:27:38.011524: Yayy! New best EMA pseudo Dice: 0.8308
343
+ 2023-11-01 16:27:41.063936:
344
+ 2023-11-01 16:27:41.064081: Epoch 40
345
+ 2023-11-01 16:27:41.064181: Current learning rate: 0.00964
346
+ 2023-11-01 16:35:06.424573: train_loss -0.8501
347
+ 2023-11-01 16:35:06.424753: val_loss -0.8421
348
+ 2023-11-01 16:35:06.424830: Pseudo dice [0.8629]
349
+ 2023-11-01 16:35:06.424915: Epoch time: 445.36 s
350
+ 2023-11-01 16:35:06.424985: Yayy! New best EMA pseudo Dice: 0.834
351
+ 2023-11-01 16:35:09.722006:
352
+ 2023-11-01 16:35:09.722123: Epoch 41
353
+ 2023-11-01 16:35:09.722227: Current learning rate: 0.00963
354
+ 2023-11-01 16:42:39.942986: train_loss -0.8524
355
+ 2023-11-01 16:42:39.943140: val_loss -0.8517
356
+ 2023-11-01 16:42:39.943217: Pseudo dice [0.8677]
357
+ 2023-11-01 16:42:39.943298: Epoch time: 450.22 s
358
+ 2023-11-01 16:42:39.943368: Yayy! New best EMA pseudo Dice: 0.8374
359
+ 2023-11-01 16:42:42.956915:
360
+ 2023-11-01 16:42:42.957030: Epoch 42
361
+ 2023-11-01 16:42:42.957135: Current learning rate: 0.00962
362
+ 2023-11-01 16:50:11.197622: train_loss -0.854
363
+ 2023-11-01 16:50:11.197785: val_loss -0.8326
364
+ 2023-11-01 16:50:11.197862: Pseudo dice [0.8488]
365
+ 2023-11-01 16:50:11.197942: Epoch time: 448.24 s
366
+ 2023-11-01 16:50:11.198013: Yayy! New best EMA pseudo Dice: 0.8385
367
+ 2023-11-01 16:50:14.224224:
368
+ 2023-11-01 16:50:14.224344: Epoch 43
369
+ 2023-11-01 16:50:14.224468: Current learning rate: 0.00961
370
+ 2023-11-01 16:57:40.080140: train_loss -0.8538
371
+ 2023-11-01 16:57:40.080308: val_loss -0.8373
372
+ 2023-11-01 16:57:40.080385: Pseudo dice [0.8554]
373
+ 2023-11-01 16:57:40.080467: Epoch time: 445.86 s
374
+ 2023-11-01 16:57:40.080536: Yayy! New best EMA pseudo Dice: 0.8402
375
+ 2023-11-01 16:57:43.056427:
376
+ 2023-11-01 16:57:43.056543: Epoch 44
377
+ 2023-11-01 16:57:43.056653: Current learning rate: 0.0096
378
+ 2023-11-01 17:05:12.675642: train_loss -0.8589
379
+ 2023-11-01 17:05:12.675794: val_loss -0.8389
380
+ 2023-11-01 17:05:12.675871: Pseudo dice [0.8584]
381
+ 2023-11-01 17:05:12.675954: Epoch time: 449.62 s
382
+ 2023-11-01 17:05:12.676023: Yayy! New best EMA pseudo Dice: 0.842
383
+ 2023-11-01 17:05:15.697622:
384
+ 2023-11-01 17:05:15.697821: Epoch 45
385
+ 2023-11-01 17:05:15.697938: Current learning rate: 0.00959
386
+ 2023-11-01 17:12:43.766387: train_loss -0.8544
387
+ 2023-11-01 17:12:43.766549: val_loss -0.8438
388
+ 2023-11-01 17:12:43.766625: Pseudo dice [0.8578]
389
+ 2023-11-01 17:12:43.766706: Epoch time: 448.07 s
390
+ 2023-11-01 17:12:43.766776: Yayy! New best EMA pseudo Dice: 0.8436
391
+ 2023-11-01 17:12:46.757169:
392
+ 2023-11-01 17:12:46.757293: Epoch 46
393
+ 2023-11-01 17:12:46.757416: Current learning rate: 0.00959
394
+ 2023-11-01 17:20:12.075120: train_loss -0.8558
395
+ 2023-11-01 17:20:12.075302: val_loss -0.7968
396
+ 2023-11-01 17:20:12.075379: Pseudo dice [0.8141]
397
+ 2023-11-01 17:20:12.075462: Epoch time: 445.32 s
398
+ 2023-11-01 17:20:13.323754:
399
+ 2023-11-01 17:20:13.323933: Epoch 47
400
+ 2023-11-01 17:20:13.324040: Current learning rate: 0.00958
401
+ 2023-11-01 17:27:38.902053: train_loss -0.8482
402
+ 2023-11-01 17:27:38.902219: val_loss -0.8417
403
+ 2023-11-01 17:27:38.902297: Pseudo dice [0.8614]
404
+ 2023-11-01 17:27:38.902385: Epoch time: 445.58 s
405
+ 2023-11-01 17:27:40.520520:
406
+ 2023-11-01 17:27:40.520679: Epoch 48
407
+ 2023-11-01 17:27:40.520787: Current learning rate: 0.00957
408
+ 2023-11-01 17:35:07.209738: train_loss -0.8507
409
+ 2023-11-01 17:35:07.209899: val_loss -0.8082
410
+ 2023-11-01 17:35:07.209974: Pseudo dice [0.8215]
411
+ 2023-11-01 17:35:07.210057: Epoch time: 446.69 s
412
+ 2023-11-01 17:35:08.436496:
413
+ 2023-11-01 17:35:08.436598: Epoch 49
414
+ 2023-11-01 17:35:08.436752: Current learning rate: 0.00956
415
+ 2023-11-01 17:42:35.965141: train_loss -0.8425
416
+ 2023-11-01 17:42:35.965289: val_loss -0.8205
417
+ 2023-11-01 17:42:35.965364: Pseudo dice [0.8438]
418
+ 2023-11-01 17:42:35.965443: Epoch time: 447.53 s
419
+ 2023-11-01 17:42:37.541196:
420
+ 2023-11-01 17:42:37.541528: Epoch 50
421
+ 2023-11-01 17:42:37.541683: Current learning rate: 0.00955
422
+ 2023-11-01 17:50:02.805960: train_loss -0.8562
423
+ 2023-11-01 17:50:02.806122: val_loss -0.8345
424
+ 2023-11-01 17:50:02.806199: Pseudo dice [0.8461]
425
+ 2023-11-01 17:50:02.806281: Epoch time: 445.27 s
426
+ 2023-11-01 17:50:04.146918:
427
+ 2023-11-01 17:50:04.147065: Epoch 51
428
+ 2023-11-01 17:50:04.147166: Current learning rate: 0.00954
429
+ 2023-11-01 17:57:29.427134: train_loss -0.8483
430
+ 2023-11-01 17:57:29.427290: val_loss -0.8394
431
+ 2023-11-01 17:57:29.427366: Pseudo dice [0.8555]
432
+ 2023-11-01 17:57:29.427447: Epoch time: 445.28 s
433
+ 2023-11-01 17:57:30.945216:
434
+ 2023-11-01 17:57:30.945407: Epoch 52
435
+ 2023-11-01 17:57:30.945521: Current learning rate: 0.00953
436
+ 2023-11-01 18:04:59.921725: train_loss -0.8523
437
+ 2023-11-01 18:04:59.921882: val_loss -0.8258
438
+ 2023-11-01 18:04:59.921958: Pseudo dice [0.843]
439
+ 2023-11-01 18:04:59.922040: Epoch time: 448.98 s
440
+ 2023-11-01 18:05:01.229496:
441
+ 2023-11-01 18:05:01.229633: Epoch 53
442
+ 2023-11-01 18:05:01.229739: Current learning rate: 0.00952
443
+ 2023-11-01 18:12:26.623523: train_loss -0.8549
444
+ 2023-11-01 18:12:26.623710: val_loss -0.836
445
+ 2023-11-01 18:12:26.623787: Pseudo dice [0.8523]
446
+ 2023-11-01 18:12:26.623871: Epoch time: 445.39 s
447
+ 2023-11-01 18:12:26.623942: Yayy! New best EMA pseudo Dice: 0.8438
448
+ 2023-11-01 18:12:29.501647:
449
+ 2023-11-01 18:12:29.501828: Epoch 54
450
+ 2023-11-01 18:12:29.501939: Current learning rate: 0.00951
451
+ 2023-11-01 18:19:54.970378: train_loss -0.8519
452
+ 2023-11-01 18:19:54.970553: val_loss -0.8298
453
+ 2023-11-01 18:19:54.970630: Pseudo dice [0.8471]
454
+ 2023-11-01 18:19:54.970712: Epoch time: 445.47 s
455
+ 2023-11-01 18:19:54.970783: Yayy! New best EMA pseudo Dice: 0.8441
456
+ 2023-11-01 18:19:58.170483:
457
+ 2023-11-01 18:19:58.170775: Epoch 55
458
+ 2023-11-01 18:19:58.171014: Current learning rate: 0.0095
459
+ 2023-11-01 18:27:23.854812: train_loss -0.8497
460
+ 2023-11-01 18:27:23.854960: val_loss -0.8195
461
+ 2023-11-01 18:27:23.855038: Pseudo dice [0.8423]
462
+ 2023-11-01 18:27:23.855120: Epoch time: 445.69 s
463
+ 2023-11-01 18:27:25.084387:
464
+ 2023-11-01 18:27:25.084543: Epoch 56
465
+ 2023-11-01 18:27:25.084657: Current learning rate: 0.00949
466
+ 2023-11-01 18:33:12.627448: train_loss -0.8361
467
+ 2023-11-01 18:33:12.627676: val_loss -0.8221
468
+ 2023-11-01 18:33:12.627755: Pseudo dice [0.8399]
469
+ 2023-11-01 18:33:12.627836: Epoch time: 347.54 s
470
+ 2023-11-01 18:33:13.825053:
471
+ 2023-11-01 18:33:13.825153: Epoch 57
472
+ 2023-11-01 18:33:13.825267: Current learning rate: 0.00949
473
+ 2023-11-01 18:38:50.319673: train_loss -0.8453
474
+ 2023-11-01 18:38:50.319819: val_loss -0.8021
475
+ 2023-11-01 18:38:50.319909: Pseudo dice [0.8086]
476
+ 2023-11-01 18:38:50.319992: Epoch time: 336.5 s
477
+ 2023-11-01 18:38:51.520447:
478
+ 2023-11-01 18:38:51.520544: Epoch 58
479
+ 2023-11-01 18:38:51.520666: Current learning rate: 0.00948
480
+ 2023-11-01 18:44:28.636092: train_loss -0.8507
481
+ 2023-11-01 18:44:28.636245: val_loss -0.8383
482
+ 2023-11-01 18:44:28.636337: Pseudo dice [0.8568]
483
+ 2023-11-01 18:44:28.636418: Epoch time: 337.12 s
484
+ 2023-11-01 18:44:30.032076:
485
+ 2023-11-01 18:44:30.032265: Epoch 59
486
+ 2023-11-01 18:44:30.032413: Current learning rate: 0.00947
487
+ 2023-11-01 18:50:46.285170: train_loss -0.8447
488
+ 2023-11-01 18:50:46.285314: val_loss -0.8489
489
+ 2023-11-01 18:50:46.285390: Pseudo dice [0.8629]
490
+ 2023-11-01 18:50:46.285480: Epoch time: 376.25 s
491
+ 2023-11-01 18:50:47.655574:
492
+ 2023-11-01 18:50:47.655704: Epoch 60
493
+ 2023-11-01 18:50:47.655826: Current learning rate: 0.00946
494
+ 2023-11-01 18:57:23.430940: train_loss -0.8436
495
+ 2023-11-01 18:57:23.431118: val_loss -0.8312
496
+ 2023-11-01 18:57:23.431200: Pseudo dice [0.8411]
497
+ 2023-11-01 18:57:23.431286: Epoch time: 395.78 s
498
+ 2023-11-01 18:57:24.694742:
499
+ 2023-11-01 18:57:24.694942: Epoch 61
500
+ 2023-11-01 18:57:24.695078: Current learning rate: 0.00945
501
+ 2023-11-01 19:03:59.754192: train_loss -0.8472
502
+ 2023-11-01 19:03:59.754373: val_loss -0.8148
503
+ 2023-11-01 19:03:59.754452: Pseudo dice [0.8301]
504
+ 2023-11-01 19:03:59.754535: Epoch time: 395.06 s
505
+ 2023-11-01 19:04:01.027196:
506
+ 2023-11-01 19:04:01.027352: Epoch 62
507
+ 2023-11-01 19:04:01.027515: Current learning rate: 0.00944
508
+ 2023-11-01 19:10:36.589362: train_loss -0.8603
509
+ 2023-11-01 19:10:36.589518: val_loss -0.8392
510
+ 2023-11-01 19:10:36.589617: Pseudo dice [0.8541]
511
+ 2023-11-01 19:10:36.589709: Epoch time: 395.56 s
512
+ 2023-11-01 19:10:37.954148:
513
+ 2023-11-01 19:10:37.954282: Epoch 63
514
+ 2023-11-01 19:10:37.954410: Current learning rate: 0.00943
515
+ 2023-11-01 19:17:13.978469: train_loss -0.8618
516
+ 2023-11-01 19:17:13.978666: val_loss -0.8285
517
+ 2023-11-01 19:17:13.978747: Pseudo dice [0.8399]
518
+ 2023-11-01 19:17:13.978833: Epoch time: 396.03 s
519
+ 2023-11-01 19:17:15.527785:
520
+ 2023-11-01 19:17:15.528210: Epoch 64
521
+ 2023-11-01 19:17:15.528411: Current learning rate: 0.00942
522
+ 2023-11-01 19:23:52.847772: train_loss -0.8588
523
+ 2023-11-01 19:23:52.847927: val_loss -0.8411
524
+ 2023-11-01 19:23:52.848003: Pseudo dice [0.8574]
525
+ 2023-11-01 19:23:52.848083: Epoch time: 397.32 s
526
+ 2023-11-01 19:23:52.848154: Yayy! New best EMA pseudo Dice: 0.8445
527
+ 2023-11-01 19:23:55.809385:
528
+ 2023-11-01 19:23:55.809500: Epoch 65
529
+ 2023-11-01 19:23:55.809601: Current learning rate: 0.00941
530
+ 2023-11-01 19:30:31.975451: train_loss -0.8463
531
+ 2023-11-01 19:30:31.975610: val_loss -0.836
532
+ 2023-11-01 19:30:31.975701: Pseudo dice [0.8515]
533
+ 2023-11-01 19:30:31.975793: Epoch time: 396.17 s
534
+ 2023-11-01 19:30:31.975872: Yayy! New best EMA pseudo Dice: 0.8452
535
+ 2023-11-01 19:30:35.173520:
536
+ 2023-11-01 19:30:35.173799: Epoch 66
537
+ 2023-11-01 19:30:35.173909: Current learning rate: 0.0094
538
+ 2023-11-01 19:37:12.180462: train_loss -0.8593
539
+ 2023-11-01 19:37:12.180623: val_loss -0.8328
540
+ 2023-11-01 19:37:12.180711: Pseudo dice [0.8484]
541
+ 2023-11-01 19:37:12.180791: Epoch time: 397.01 s
542
+ 2023-11-01 19:37:12.180862: Yayy! New best EMA pseudo Dice: 0.8455
543
+ 2023-11-01 19:37:15.188655:
544
+ 2023-11-01 19:37:15.188773: Epoch 67
545
+ 2023-11-01 19:37:15.188884: Current learning rate: 0.00939
546
+ 2023-11-01 19:43:51.512940: train_loss -0.8624
547
+ 2023-11-01 19:43:51.513099: val_loss -0.8392
548
+ 2023-11-01 19:43:51.513189: Pseudo dice [0.8586]
549
+ 2023-11-01 19:43:51.513280: Epoch time: 396.33 s
550
+ 2023-11-01 19:43:51.513359: Yayy! New best EMA pseudo Dice: 0.8468
551
+ 2023-11-01 19:43:54.565564:
552
+ 2023-11-01 19:43:54.565837: Epoch 68
553
+ 2023-11-01 19:43:54.565979: Current learning rate: 0.00939
554
+ 2023-11-01 19:50:30.502129: train_loss -0.8697
555
+ 2023-11-01 19:50:30.502393: val_loss -0.8413
556
+ 2023-11-01 19:50:30.502517: Pseudo dice [0.8568]
557
+ 2023-11-01 19:50:30.502648: Epoch time: 395.94 s
558
+ 2023-11-01 19:50:30.502760: Yayy! New best EMA pseudo Dice: 0.8478
559
+ 2023-11-01 19:50:33.600526:
560
+ 2023-11-01 19:50:33.600672: Epoch 69
561
+ 2023-11-01 19:50:33.600781: Current learning rate: 0.00938
562
+ 2023-11-01 19:56:12.777473: train_loss -0.8664
563
+ 2023-11-01 19:56:12.777643: val_loss -0.8462
564
+ 2023-11-01 19:56:12.777736: Pseudo dice [0.8589]
565
+ 2023-11-01 19:56:12.777829: Epoch time: 339.18 s
566
+ 2023-11-01 19:56:12.777909: Yayy! New best EMA pseudo Dice: 0.8489
567
+ 2023-11-01 19:56:15.909597:
568
+ 2023-11-01 19:56:15.909701: Epoch 70
569
+ 2023-11-01 19:56:15.909818: Current learning rate: 0.00937
570
+ 2023-11-01 20:01:52.535678: train_loss -0.8686
571
+ 2023-11-01 20:01:52.535833: val_loss -0.8386
572
+ 2023-11-01 20:01:52.535922: Pseudo dice [0.8573]
573
+ 2023-11-01 20:01:52.536009: Epoch time: 336.63 s
574
+ 2023-11-01 20:01:52.536083: Yayy! New best EMA pseudo Dice: 0.8498
575
+ 2023-11-01 20:01:55.612131:
576
+ 2023-11-01 20:01:55.612319: Epoch 71
577
+ 2023-11-01 20:01:55.612489: Current learning rate: 0.00936
578
+ 2023-11-01 20:07:37.594163: train_loss -0.8619
579
+ 2023-11-01 20:07:37.594302: val_loss -0.8419
580
+ 2023-11-01 20:07:37.594393: Pseudo dice [0.8585]
581
+ 2023-11-01 20:07:37.594490: Epoch time: 341.98 s
582
+ 2023-11-01 20:07:37.594563: Yayy! New best EMA pseudo Dice: 0.8507
583
+ 2023-11-01 20:07:40.613120:
584
+ 2023-11-01 20:07:40.613220: Epoch 72
585
+ 2023-11-01 20:07:40.613333: Current learning rate: 0.00935
586
+ 2023-11-01 20:13:17.532016: train_loss -0.8648
587
+ 2023-11-01 20:13:17.532188: val_loss -0.8387
588
+ 2023-11-01 20:13:17.532264: Pseudo dice [0.8576]
589
+ 2023-11-01 20:13:17.532357: Epoch time: 336.92 s
590
+ 2023-11-01 20:13:17.532428: Yayy! New best EMA pseudo Dice: 0.8513
591
+ 2023-11-01 20:13:20.803119:
592
+ 2023-11-01 20:13:20.803227: Epoch 73
593
+ 2023-11-01 20:13:20.803341: Current learning rate: 0.00934
594
+ 2023-11-01 20:19:22.396014: train_loss -0.8598
595
+ 2023-11-01 20:19:22.396177: val_loss -0.8338
596
+ 2023-11-01 20:19:22.396282: Pseudo dice [0.8521]
597
+ 2023-11-01 20:19:22.396372: Epoch time: 361.59 s
598
+ 2023-11-01 20:19:22.396451: Yayy! New best EMA pseudo Dice: 0.8514
599
+ 2023-11-01 20:19:25.370741:
600
+ 2023-11-01 20:19:25.370912: Epoch 74
601
+ 2023-11-01 20:19:25.371034: Current learning rate: 0.00933
602
+ 2023-11-01 20:25:02.431605: train_loss -0.8651
603
+ 2023-11-01 20:25:02.431752: val_loss -0.8238
604
+ 2023-11-01 20:25:02.431839: Pseudo dice [0.8455]
605
+ 2023-11-01 20:25:02.431925: Epoch time: 337.06 s
606
+ 2023-11-01 20:25:03.680130:
607
+ 2023-11-01 20:25:03.680347: Epoch 75
608
+ 2023-11-01 20:25:03.680523: Current learning rate: 0.00932
609
+ 2023-11-01 20:31:06.875317: train_loss -0.8627
610
+ 2023-11-01 20:31:06.875459: val_loss -0.8485
611
+ 2023-11-01 20:31:06.875535: Pseudo dice [0.8619]
612
+ 2023-11-01 20:31:06.875614: Epoch time: 363.2 s
613
+ 2023-11-01 20:31:06.875684: Yayy! New best EMA pseudo Dice: 0.8519
614
+ 2023-11-01 20:31:09.852216:
615
+ 2023-11-01 20:31:09.852440: Epoch 76
616
+ 2023-11-01 20:31:09.852593: Current learning rate: 0.00931
617
+ 2023-11-01 20:37:29.877795: train_loss -0.8684
618
+ 2023-11-01 20:37:29.877959: val_loss -0.8405
619
+ 2023-11-01 20:37:29.878036: Pseudo dice [0.8572]
620
+ 2023-11-01 20:37:29.878119: Epoch time: 380.03 s
621
+ 2023-11-01 20:37:29.878189: Yayy! New best EMA pseudo Dice: 0.8525
622
+ 2023-11-01 20:37:32.771809:
623
+ 2023-11-01 20:37:32.771988: Epoch 77
624
+ 2023-11-01 20:37:32.772133: Current learning rate: 0.0093
625
+ 2023-11-01 20:43:09.663389: train_loss -0.8643
626
+ 2023-11-01 20:43:09.663603: val_loss -0.8492
627
+ 2023-11-01 20:43:09.663705: Pseudo dice [0.8661]
628
+ 2023-11-01 20:43:09.663815: Epoch time: 336.89 s
629
+ 2023-11-01 20:43:09.663908: Yayy! New best EMA pseudo Dice: 0.8538
630
+ 2023-11-01 20:43:13.853995:
631
+ 2023-11-01 20:43:13.854182: Epoch 78
632
+ 2023-11-01 20:43:13.854326: Current learning rate: 0.0093
633
+ 2023-11-01 20:49:45.208534: train_loss -0.8714
634
+ 2023-11-01 20:49:45.208710: val_loss -0.8546
635
+ 2023-11-01 20:49:45.208788: Pseudo dice [0.8691]
636
+ 2023-11-01 20:49:45.208872: Epoch time: 391.36 s
637
+ 2023-11-01 20:49:45.208949: Yayy! New best EMA pseudo Dice: 0.8554
638
+ 2023-11-01 20:49:48.659945:
639
+ 2023-11-01 20:49:48.660187: Epoch 79
640
+ 2023-11-01 20:49:48.660336: Current learning rate: 0.00929
641
+ 2023-11-01 20:56:27.244738: train_loss -0.8729
642
+ 2023-11-01 20:56:27.244885: val_loss -0.823
643
+ 2023-11-01 20:56:27.244961: Pseudo dice [0.8431]
644
+ 2023-11-01 20:56:27.245062: Epoch time: 398.59 s
645
+ 2023-11-01 20:56:28.606276:
646
+ 2023-11-01 20:56:28.606544: Epoch 80
647
+ 2023-11-01 20:56:28.606683: Current learning rate: 0.00928
648
+ 2023-11-01 21:02:13.477636: train_loss -0.8697
649
+ 2023-11-01 21:02:13.477855: val_loss -0.8491
650
+ 2023-11-01 21:02:13.477940: Pseudo dice [0.8644]
651
+ 2023-11-01 21:02:13.478032: Epoch time: 344.87 s
652
+ 2023-11-01 21:02:15.423834:
653
+ 2023-11-01 21:02:15.423977: Epoch 81
654
+ 2023-11-01 21:02:15.424082: Current learning rate: 0.00927
655
+ 2023-11-01 21:09:02.808094: train_loss -0.8647
656
+ 2023-11-01 21:09:02.808241: val_loss -0.8452
657
+ 2023-11-01 21:09:02.808317: Pseudo dice [0.8589]
658
+ 2023-11-01 21:09:02.808407: Epoch time: 407.39 s
659
+ 2023-11-01 21:09:02.808476: Yayy! New best EMA pseudo Dice: 0.8555
660
+ 2023-11-01 21:09:05.703171:
661
+ 2023-11-01 21:09:05.703421: Epoch 82
662
+ 2023-11-01 21:09:05.703530: Current learning rate: 0.00926
663
+ 2023-11-01 21:16:06.424918: train_loss -0.8716
664
+ 2023-11-01 21:16:06.425142: val_loss -0.8495
665
+ 2023-11-01 21:16:06.425221: Pseudo dice [0.8594]
666
+ 2023-11-01 21:16:06.425308: Epoch time: 420.72 s
667
+ 2023-11-01 21:16:06.425380: Yayy! New best EMA pseudo Dice: 0.8559
668
+ 2023-11-01 21:16:09.840999:
669
+ 2023-11-01 21:16:09.841176: Epoch 83
670
+ 2023-11-01 21:16:09.841352: Current learning rate: 0.00925
671
+ 2023-11-01 21:23:24.319916: train_loss -0.868
672
+ 2023-11-01 21:23:24.320112: val_loss -0.8406
673
+ 2023-11-01 21:23:24.320209: Pseudo dice [0.8574]
674
+ 2023-11-01 21:23:24.320314: Epoch time: 434.48 s
675
+ 2023-11-01 21:23:24.320405: Yayy! New best EMA pseudo Dice: 0.8561
676
+ 2023-11-01 21:23:27.298709:
677
+ 2023-11-01 21:23:27.298864: Epoch 84
678
+ 2023-11-01 21:23:27.298973: Current learning rate: 0.00924
679
+ 2023-11-01 21:30:43.546995: train_loss -0.8701
680
+ 2023-11-01 21:30:43.547157: val_loss -0.8253
681
+ 2023-11-01 21:30:43.547235: Pseudo dice [0.8417]
682
+ 2023-11-01 21:30:43.547317: Epoch time: 436.25 s
683
+ 2023-11-01 21:30:44.786345:
684
+ 2023-11-01 21:30:44.786538: Epoch 85
685
+ 2023-11-01 21:30:44.786641: Current learning rate: 0.00923
686
+ 2023-11-01 21:37:58.413231: train_loss -0.8723
687
+ 2023-11-01 21:37:58.413386: val_loss -0.8552
688
+ 2023-11-01 21:37:58.413463: Pseudo dice [0.8701]
689
+ 2023-11-01 21:37:58.413544: Epoch time: 433.63 s
690
+ 2023-11-01 21:37:58.413614: Yayy! New best EMA pseudo Dice: 0.8562
691
+ 2023-11-01 21:38:01.865586:
692
+ 2023-11-01 21:38:01.865703: Epoch 86
693
+ 2023-11-01 21:38:01.865823: Current learning rate: 0.00922
694
+ 2023-11-01 21:45:18.620329: train_loss -0.8744
695
+ 2023-11-01 21:45:18.620496: val_loss -0.8438
696
+ 2023-11-01 21:45:18.620577: Pseudo dice [0.8632]
697
+ 2023-11-01 21:45:18.620671: Epoch time: 436.76 s
698
+ 2023-11-01 21:45:18.620749: Yayy! New best EMA pseudo Dice: 0.8569
699
+ 2023-11-01 21:45:21.499573:
700
+ 2023-11-01 21:45:21.499693: Epoch 87
701
+ 2023-11-01 21:45:21.499795: Current learning rate: 0.00921
702
+ 2023-11-01 21:52:36.339342: train_loss -0.8717
703
+ 2023-11-01 21:52:36.339495: val_loss -0.8511
704
+ 2023-11-01 21:52:36.339571: Pseudo dice [0.8621]
705
+ 2023-11-01 21:52:36.339651: Epoch time: 434.84 s
706
+ 2023-11-01 21:52:36.339720: Yayy! New best EMA pseudo Dice: 0.8574
707
+ 2023-11-01 21:52:39.453624:
708
+ 2023-11-01 21:52:39.453743: Epoch 88
709
+ 2023-11-01 21:52:39.453866: Current learning rate: 0.0092
710
+ 2023-11-01 21:59:52.945682: train_loss -0.8619
711
+ 2023-11-01 21:59:52.945839: val_loss -0.8467
712
+ 2023-11-01 21:59:52.945930: Pseudo dice [0.8622]
713
+ 2023-11-01 21:59:52.946022: Epoch time: 433.49 s
714
+ 2023-11-01 21:59:52.946101: Yayy! New best EMA pseudo Dice: 0.8579
715
+ 2023-11-01 21:59:56.453643:
716
+ 2023-11-01 21:59:56.453880: Epoch 89
717
+ 2023-11-01 21:59:56.454071: Current learning rate: 0.0092
718
+ 2023-11-01 22:07:12.722747: train_loss -0.8726
719
+ 2023-11-01 22:07:12.722909: val_loss -0.8409
720
+ 2023-11-01 22:07:12.723001: Pseudo dice [0.8568]
721
+ 2023-11-01 22:07:12.723093: Epoch time: 436.27 s
722
+ 2023-11-01 22:07:14.056156:
723
+ 2023-11-01 22:07:14.056432: Epoch 90
724
+ 2023-11-01 22:07:14.056579: Current learning rate: 0.00919
725
+ 2023-11-01 22:14:27.353832: train_loss -0.8711
726
+ 2023-11-01 22:14:27.354097: val_loss -0.8432
727
+ 2023-11-01 22:14:27.354194: Pseudo dice [0.8589]
728
+ 2023-11-01 22:14:27.354291: Epoch time: 433.3 s
729
+ 2023-11-01 22:14:27.354371: Yayy! New best EMA pseudo Dice: 0.8579
730
+ 2023-11-01 22:14:30.763898:
731
+ 2023-11-01 22:14:30.764036: Epoch 91
732
+ 2023-11-01 22:14:30.764157: Current learning rate: 0.00918
733
+ 2023-11-01 22:21:46.157187: train_loss -0.872
734
+ 2023-11-01 22:21:46.157347: val_loss -0.8283
735
+ 2023-11-01 22:21:46.157444: Pseudo dice [0.8374]
736
+ 2023-11-01 22:21:46.157536: Epoch time: 435.39 s
737
+ 2023-11-01 22:21:47.466944:
738
+ 2023-11-01 22:21:47.467058: Epoch 92
739
+ 2023-11-01 22:21:47.467180: Current learning rate: 0.00917
740
+ 2023-11-01 22:29:04.056901: train_loss -0.8696
741
+ 2023-11-01 22:29:04.057065: val_loss -0.8485
742
+ 2023-11-01 22:29:04.057159: Pseudo dice [0.8634]
743
+ 2023-11-01 22:29:04.057250: Epoch time: 436.59 s
744
+ 2023-11-01 22:29:05.497777:
745
+ 2023-11-01 22:29:05.497897: Epoch 93
746
+ 2023-11-01 22:29:05.498011: Current learning rate: 0.00916
747
+ 2023-11-01 22:36:18.757636: train_loss -0.8747
748
+ 2023-11-01 22:36:18.757812: val_loss -0.8561
749
+ 2023-11-01 22:36:18.757903: Pseudo dice [0.8692]
750
+ 2023-11-01 22:36:18.757994: Epoch time: 433.26 s
751
+ 2023-11-01 22:36:19.993176:
752
+ 2023-11-01 22:36:19.993371: Epoch 94
753
+ 2023-11-01 22:36:19.993494: Current learning rate: 0.00915
754
+ 2023-11-01 22:43:36.651664: train_loss -0.863
755
+ 2023-11-01 22:43:36.651837: val_loss -0.8016
756
+ 2023-11-01 22:43:36.651915: Pseudo dice [0.8001]
757
+ 2023-11-01 22:43:36.651997: Epoch time: 436.66 s
758
+ 2023-11-01 22:43:37.899960:
759
+ 2023-11-01 22:43:37.900075: Epoch 95
760
+ 2023-11-01 22:43:37.900195: Current learning rate: 0.00914
761
+ 2023-11-01 22:50:51.145122: train_loss -0.8657
762
+ 2023-11-01 22:50:51.145286: val_loss -0.8316
763
+ 2023-11-01 22:50:51.145362: Pseudo dice [0.8498]
764
+ 2023-11-01 22:50:51.145445: Epoch time: 433.25 s
765
+ 2023-11-01 22:50:52.408865:
766
+ 2023-11-01 22:50:52.409089: Epoch 96
767
+ 2023-11-01 22:50:52.409196: Current learning rate: 0.00913
768
+ 2023-11-01 22:58:09.094585: train_loss -0.8729
769
+ 2023-11-01 22:58:09.094731: val_loss -0.856
770
+ 2023-11-01 22:58:09.094809: Pseudo dice [0.8692]
771
+ 2023-11-01 22:58:09.094890: Epoch time: 436.69 s
772
+ 2023-11-01 22:58:10.360434:
773
+ 2023-11-01 22:58:10.360551: Epoch 97
774
+ 2023-11-01 22:58:10.360657: Current learning rate: 0.00912
775
+ 2023-11-01 23:05:23.430537: train_loss -0.8789
776
+ 2023-11-01 23:05:23.430861: val_loss -0.8264
777
+ 2023-11-01 23:05:23.430969: Pseudo dice [0.8379]
778
+ 2023-11-01 23:05:23.431085: Epoch time: 433.07 s
779
+ 2023-11-01 23:05:24.732380:
780
+ 2023-11-01 23:05:24.732483: Epoch 98
781
+ 2023-11-01 23:05:24.732599: Current learning rate: 0.00911
782
+ 2023-11-01 23:12:41.139498: train_loss -0.8794
783
+ 2023-11-01 23:12:41.139642: val_loss -0.84
784
+ 2023-11-01 23:12:41.139719: Pseudo dice [0.855]
785
+ 2023-11-01 23:12:41.139799: Epoch time: 436.41 s
786
+ 2023-11-01 23:12:42.402315:
787
+ 2023-11-01 23:12:42.402425: Epoch 99
788
+ 2023-11-01 23:12:42.402545: Current learning rate: 0.0091
789
+ 2023-11-01 23:19:57.647396: train_loss -0.8791
790
+ 2023-11-01 23:19:57.647616: val_loss -0.8442
791
+ 2023-11-01 23:19:57.647699: Pseudo dice [0.8561]
792
+ 2023-11-01 23:19:57.647796: Epoch time: 435.25 s
793
+ 2023-11-01 23:20:00.754984:
794
+ 2023-11-01 23:20:00.755117: Epoch 100
795
+ 2023-11-01 23:20:00.755220: Current learning rate: 0.0091
796
+ 2023-11-01 23:27:14.175089: train_loss -0.8769
797
+ 2023-11-01 23:27:14.175245: val_loss -0.8408
798
+ 2023-11-01 23:27:14.175321: Pseudo dice [0.8521]
799
+ 2023-11-01 23:27:14.175401: Epoch time: 433.42 s
800
+ 2023-11-01 23:27:15.540580:
801
+ 2023-11-01 23:27:15.540734: Epoch 101
802
+ 2023-11-01 23:27:15.540843: Current learning rate: 0.00909
803
+ 2023-11-01 23:34:31.044508: train_loss -0.874
804
+ 2023-11-01 23:34:31.044677: val_loss -0.8427
805
+ 2023-11-01 23:34:31.044759: Pseudo dice [0.8609]
806
+ 2023-11-01 23:34:31.044841: Epoch time: 435.5 s
807
+ 2023-11-01 23:34:32.272377:
808
+ 2023-11-01 23:34:32.272567: Epoch 102
809
+ 2023-11-01 23:34:32.272746: Current learning rate: 0.00908
810
+ 2023-11-01 23:41:45.649644: train_loss -0.8798
811
+ 2023-11-01 23:41:45.649800: val_loss -0.8595
812
+ 2023-11-01 23:41:45.649878: Pseudo dice [0.8681]
813
+ 2023-11-01 23:41:45.649960: Epoch time: 433.38 s
814
+ 2023-11-01 23:41:46.995403:
815
+ 2023-11-01 23:41:46.995526: Epoch 103
816
+ 2023-11-01 23:41:46.995647: Current learning rate: 0.00907
817
+ 2023-11-01 23:49:03.479175: train_loss -0.8759
818
+ 2023-11-01 23:49:03.479380: val_loss -0.8465
819
+ 2023-11-01 23:49:03.479460: Pseudo dice [0.8616]
820
+ 2023-11-01 23:49:03.479548: Epoch time: 436.48 s
821
+ 2023-11-01 23:49:04.767704:
822
+ 2023-11-01 23:49:04.767817: Epoch 104
823
+ 2023-11-01 23:49:04.767917: Current learning rate: 0.00906
824
+ 2023-11-01 23:56:17.898960: train_loss -0.8801
825
+ 2023-11-01 23:56:17.899181: val_loss -0.8407
826
+ 2023-11-01 23:56:17.899263: Pseudo dice [0.8542]
827
+ 2023-11-01 23:56:17.899352: Epoch time: 433.13 s
828
+ 2023-11-01 23:56:19.399012:
829
+ 2023-11-01 23:56:19.399129: Epoch 105
830
+ 2023-11-01 23:56:19.399232: Current learning rate: 0.00905
831
+ 2023-11-02 00:03:35.962226: train_loss -0.8822
832
+ 2023-11-02 00:03:35.962387: val_loss -0.8505
833
+ 2023-11-02 00:03:35.962470: Pseudo dice [0.8659]
834
+ 2023-11-02 00:03:35.962558: Epoch time: 436.56 s
835
+ 2023-11-02 00:03:37.239174:
836
+ 2023-11-02 00:03:37.239378: Epoch 106
837
+ 2023-11-02 00:03:37.239521: Current learning rate: 0.00904
838
+ 2023-11-02 00:10:52.053462: train_loss -0.8729
839
+ 2023-11-02 00:10:52.053623: val_loss -0.8452
840
+ 2023-11-02 00:10:52.053720: Pseudo dice [0.8592]
841
+ 2023-11-02 00:10:52.053812: Epoch time: 434.82 s
842
+ 2023-11-02 00:10:53.288189:
843
+ 2023-11-02 00:10:53.288384: Epoch 107
844
+ 2023-11-02 00:10:53.288568: Current learning rate: 0.00903
845
+ 2023-11-02 00:18:07.516624: train_loss -0.8753
846
+ 2023-11-02 00:18:07.516794: val_loss -0.8403
847
+ 2023-11-02 00:18:07.516875: Pseudo dice [0.862]
848
+ 2023-11-02 00:18:07.516957: Epoch time: 434.23 s
849
+ 2023-11-02 00:18:09.066262:
850
+ 2023-11-02 00:18:09.066387: Epoch 108
851
+ 2023-11-02 00:18:09.066493: Current learning rate: 0.00902
852
+ 2023-11-02 00:25:25.093628: train_loss -0.8646
853
+ 2023-11-02 00:25:25.093786: val_loss -0.8304
854
+ 2023-11-02 00:25:25.093868: Pseudo dice [0.844]
855
+ 2023-11-02 00:25:25.093954: Epoch time: 436.03 s
856
+ 2023-11-02 00:25:26.343967:
857
+ 2023-11-02 00:25:26.344111: Epoch 109
858
+ 2023-11-02 00:25:26.344217: Current learning rate: 0.00901
859
+ 2023-11-02 00:32:39.425357: train_loss -0.8646
860
+ 2023-11-02 00:32:39.425514: val_loss -0.846
861
+ 2023-11-02 00:32:39.425593: Pseudo dice [0.8602]
862
+ 2023-11-02 00:32:39.425676: Epoch time: 433.08 s
863
+ 2023-11-02 00:32:40.771884:
864
+ 2023-11-02 00:32:40.772107: Epoch 110
865
+ 2023-11-02 00:32:40.772216: Current learning rate: 0.009
866
+ 2023-11-02 00:39:55.921150: train_loss -0.8712
867
+ 2023-11-02 00:39:55.921306: val_loss -0.8452
868
+ 2023-11-02 00:39:55.921383: Pseudo dice [0.8602]
869
+ 2023-11-02 00:39:55.921463: Epoch time: 435.15 s
870
+ 2023-11-02 00:39:57.206603:
871
+ 2023-11-02 00:39:57.206715: Epoch 111
872
+ 2023-11-02 00:39:57.206824: Current learning rate: 0.009
873
+ 2023-11-02 00:47:10.277395: train_loss -0.8717
874
+ 2023-11-02 00:47:10.277572: val_loss -0.8421
875
+ 2023-11-02 00:47:10.277649: Pseudo dice [0.8545]
876
+ 2023-11-02 00:47:10.277732: Epoch time: 433.07 s
877
+ 2023-11-02 00:47:11.563618:
878
+ 2023-11-02 00:47:11.563894: Epoch 112
879
+ 2023-11-02 00:47:11.564039: Current learning rate: 0.00899
880
+ 2023-11-02 00:54:27.575353: train_loss -0.8729
881
+ 2023-11-02 00:54:27.575510: val_loss -0.8355
882
+ 2023-11-02 00:54:27.575592: Pseudo dice [0.8421]
883
+ 2023-11-02 00:54:27.575674: Epoch time: 436.01 s
884
+ 2023-11-02 00:54:28.860415:
885
+ 2023-11-02 00:54:28.860530: Epoch 113
886
+ 2023-11-02 00:54:28.860644: Current learning rate: 0.00898
887
+ 2023-11-02 01:01:41.733814: train_loss -0.8769
888
+ 2023-11-02 01:01:41.733976: val_loss -0.8443
889
+ 2023-11-02 01:01:41.734058: Pseudo dice [0.8499]
890
+ 2023-11-02 01:01:41.734144: Epoch time: 432.87 s
891
+ 2023-11-02 01:01:43.076969:
892
+ 2023-11-02 01:01:43.077088: Epoch 114
893
+ 2023-11-02 01:01:43.077198: Current learning rate: 0.00897
894
+ 2023-11-02 01:08:58.539639: train_loss -0.8788
895
+ 2023-11-02 01:08:58.539793: val_loss -0.8487
896
+ 2023-11-02 01:08:58.539922: Pseudo dice [0.8583]
897
+ 2023-11-02 01:08:58.540055: Epoch time: 435.46 s
898
+ 2023-11-02 01:08:59.975466:
899
+ 2023-11-02 01:08:59.975611: Epoch 115
900
+ 2023-11-02 01:08:59.975716: Current learning rate: 0.00896
901
+ 2023-11-02 01:16:13.366565: train_loss -0.8828
902
+ 2023-11-02 01:16:13.366741: val_loss -0.8511
903
+ 2023-11-02 01:16:13.366820: Pseudo dice [0.8594]
904
+ 2023-11-02 01:16:13.366902: Epoch time: 433.39 s
905
+ 2023-11-02 01:16:14.676200:
906
+ 2023-11-02 01:16:14.676552: Epoch 116
907
+ 2023-11-02 01:16:14.676724: Current learning rate: 0.00895
908
+ 2023-11-02 01:23:31.428191: train_loss -0.8836
909
+ 2023-11-02 01:23:31.428341: val_loss -0.8506
910
+ 2023-11-02 01:23:31.428421: Pseudo dice [0.862]
911
+ 2023-11-02 01:23:31.428503: Epoch time: 436.75 s
912
+ 2023-11-02 01:23:32.719769:
913
+ 2023-11-02 01:23:32.719933: Epoch 117
914
+ 2023-11-02 01:23:32.720038: Current learning rate: 0.00894
915
+ 2023-11-02 01:30:46.037320: train_loss -0.8722
916
+ 2023-11-02 01:30:46.037478: val_loss -0.8261
917
+ 2023-11-02 01:30:46.037570: Pseudo dice [0.8424]
918
+ 2023-11-02 01:30:46.037661: Epoch time: 433.32 s
919
+ 2023-11-02 01:30:47.285925:
920
+ 2023-11-02 01:30:47.286037: Epoch 118
921
+ 2023-11-02 01:30:47.286166: Current learning rate: 0.00893
922
+ 2023-11-02 01:38:02.800852: train_loss -0.8761
923
+ 2023-11-02 01:38:02.801153: val_loss -0.8434
924
+ 2023-11-02 01:38:02.801300: Pseudo dice [0.853]
925
+ 2023-11-02 01:38:02.801437: Epoch time: 435.52 s
926
+ 2023-11-02 01:38:04.119251:
927
+ 2023-11-02 01:38:04.119370: Epoch 119
928
+ 2023-11-02 01:38:04.119471: Current learning rate: 0.00892
929
+ 2023-11-02 01:45:20.250262: train_loss -0.8768
930
+ 2023-11-02 01:45:20.250430: val_loss -0.8447
931
+ 2023-11-02 01:45:20.250508: Pseudo dice [0.8596]
932
+ 2023-11-02 01:45:20.250591: Epoch time: 436.13 s
933
+ 2023-11-02 01:45:21.521047:
934
+ 2023-11-02 01:45:21.521251: Epoch 120
935
+ 2023-11-02 01:45:21.521351: Current learning rate: 0.00891
936
+ 2023-11-02 01:52:34.959630: train_loss -0.881
937
+ 2023-11-02 01:52:34.959801: val_loss -0.8285
938
+ 2023-11-02 01:52:34.959894: Pseudo dice [0.8438]
939
+ 2023-11-02 01:52:34.959987: Epoch time: 433.44 s
940
+ 2023-11-02 01:52:36.346380:
941
+ 2023-11-02 01:52:36.346596: Epoch 121
942
+ 2023-11-02 01:52:36.346726: Current learning rate: 0.0089
943
+ 2023-11-02 01:59:53.028419: train_loss -0.8879
944
+ 2023-11-02 01:59:53.028565: val_loss -0.8463
945
+ 2023-11-02 01:59:53.028667: Pseudo dice [0.8649]
946
+ 2023-11-02 01:59:53.028763: Epoch time: 436.68 s
947
+ 2023-11-02 01:59:54.622047:
948
+ 2023-11-02 01:59:54.622311: Epoch 122
949
+ 2023-11-02 01:59:54.622450: Current learning rate: 0.00889
950
+ 2023-11-02 02:07:07.666163: train_loss -0.8842
951
+ 2023-11-02 02:07:07.666328: val_loss -0.8464
952
+ 2023-11-02 02:07:07.666414: Pseudo dice [0.8556]
953
+ 2023-11-02 02:07:07.666498: Epoch time: 433.05 s
954
+ 2023-11-02 02:07:09.125690:
955
+ 2023-11-02 02:07:09.125872: Epoch 123
956
+ 2023-11-02 02:07:09.125980: Current learning rate: 0.00889
957
+ 2023-11-02 02:14:24.177333: train_loss -0.8826
958
+ 2023-11-02 02:14:24.177678: val_loss -0.8315
959
+ 2023-11-02 02:14:24.177774: Pseudo dice [0.8463]
960
+ 2023-11-02 02:14:24.177869: Epoch time: 435.05 s
961
+ 2023-11-02 02:14:25.468835:
962
+ 2023-11-02 02:14:25.468954: Epoch 124
963
+ 2023-11-02 02:14:25.469055: Current learning rate: 0.00888
964
+ 2023-11-02 02:21:39.608838: train_loss -0.8741
965
+ 2023-11-02 02:21:39.609026: val_loss -0.8357
966
+ 2023-11-02 02:21:39.609122: Pseudo dice [0.8502]
967
+ 2023-11-02 02:21:39.609216: Epoch time: 434.14 s
968
+ 2023-11-02 02:21:40.857705:
969
+ 2023-11-02 02:21:40.857822: Epoch 125
970
+ 2023-11-02 02:21:40.857941: Current learning rate: 0.00887
971
+ 2023-11-02 02:28:55.585155: train_loss -0.8775
972
+ 2023-11-02 02:28:55.585331: val_loss -0.842
973
+ 2023-11-02 02:28:55.585410: Pseudo dice [0.8557]
974
+ 2023-11-02 02:28:55.585504: Epoch time: 434.73 s
975
+ 2023-11-02 02:28:56.891179:
976
+ 2023-11-02 02:28:56.891394: Epoch 126
977
+ 2023-11-02 02:28:56.891504: Current learning rate: 0.00886
978
+ 2023-11-02 02:36:12.705953: train_loss -0.8815
979
+ 2023-11-02 02:36:12.706116: val_loss -0.8367
980
+ 2023-11-02 02:36:12.706193: Pseudo dice [0.8525]
981
+ 2023-11-02 02:36:12.706274: Epoch time: 435.82 s
982
+ 2023-11-02 02:36:13.979010:
983
+ 2023-11-02 02:36:13.979129: Epoch 127
984
+ 2023-11-02 02:36:13.979249: Current learning rate: 0.00885
985
+ 2023-11-02 02:43:27.211055: train_loss -0.8716
986
+ 2023-11-02 02:43:27.211225: val_loss -0.8437
987
+ 2023-11-02 02:43:27.211321: Pseudo dice [0.857]
988
+ 2023-11-02 02:43:27.211414: Epoch time: 433.23 s
989
+ 2023-11-02 02:43:28.510709:
990
+ 2023-11-02 02:43:28.510821: Epoch 128
991
+ 2023-11-02 02:43:28.510940: Current learning rate: 0.00884
992
+ 2023-11-02 02:50:45.658164: train_loss -0.8811
993
+ 2023-11-02 02:50:45.658325: val_loss -0.8458
994
+ 2023-11-02 02:50:45.658402: Pseudo dice [0.862]
995
+ 2023-11-02 02:50:45.658485: Epoch time: 437.15 s
996
+ 2023-11-02 02:50:46.936824:
997
+ 2023-11-02 02:50:46.937009: Epoch 129
998
+ 2023-11-02 02:50:46.937130: Current learning rate: 0.00883
999
+ 2023-11-02 02:58:01.780977: train_loss -0.8781
1000
+ 2023-11-02 02:58:01.781137: val_loss -0.8446
1001
+ 2023-11-02 02:58:01.781215: Pseudo dice [0.8583]
1002
+ 2023-11-02 02:58:01.781296: Epoch time: 434.84 s
1003
+ 2023-11-02 02:58:03.227188:
1004
+ 2023-11-02 02:58:03.227302: Epoch 130
1005
+ 2023-11-02 02:58:03.227420: Current learning rate: 0.00882
1006
+ 2023-11-02 03:05:16.962477: train_loss -0.8732
1007
+ 2023-11-02 03:05:16.962643: val_loss -0.8536
1008
+ 2023-11-02 03:05:16.962735: Pseudo dice [0.8692]
1009
+ 2023-11-02 03:05:16.962826: Epoch time: 433.74 s
1010
+ 2023-11-02 03:05:18.442439:
1011
+ 2023-11-02 03:05:18.442561: Epoch 131
1012
+ 2023-11-02 03:05:18.442682: Current learning rate: 0.00881
1013
+ 2023-11-02 03:12:34.690634: train_loss -0.8739
1014
+ 2023-11-02 03:12:34.690775: val_loss -0.8037
1015
+ 2023-11-02 03:12:34.690854: Pseudo dice [0.8269]
1016
+ 2023-11-02 03:12:34.690934: Epoch time: 436.25 s
1017
+ 2023-11-02 03:12:35.997582:
1018
+ 2023-11-02 03:12:35.997698: Epoch 132
1019
+ 2023-11-02 03:12:35.997820: Current learning rate: 0.0088
1020
+ 2023-11-02 03:19:48.986389: train_loss -0.8611
1021
+ 2023-11-02 03:19:48.986553: val_loss -0.8278
1022
+ 2023-11-02 03:19:48.986631: Pseudo dice [0.845]
1023
+ 2023-11-02 03:19:48.986713: Epoch time: 432.99 s
1024
+ 2023-11-02 03:19:50.388693:
1025
+ 2023-11-02 03:19:50.388926: Epoch 133
1026
+ 2023-11-02 03:19:50.389046: Current learning rate: 0.00879
1027
+ 2023-11-02 03:27:05.413781: train_loss -0.8635
1028
+ 2023-11-02 03:27:05.413942: val_loss -0.8314
1029
+ 2023-11-02 03:27:05.414019: Pseudo dice [0.8389]
1030
+ 2023-11-02 03:27:05.414100: Epoch time: 435.03 s
1031
+ 2023-11-02 03:27:06.669495:
1032
+ 2023-11-02 03:27:06.669669: Epoch 134
1033
+ 2023-11-02 03:27:06.669823: Current learning rate: 0.00879
1034
+ 2023-11-02 03:34:20.001205: train_loss -0.8802
1035
+ 2023-11-02 03:34:20.001367: val_loss -0.8463
1036
+ 2023-11-02 03:34:20.001444: Pseudo dice [0.8567]
1037
+ 2023-11-02 03:34:20.001527: Epoch time: 433.33 s
1038
+ 2023-11-02 03:34:21.361528:
1039
+ 2023-11-02 03:34:21.361763: Epoch 135
1040
+ 2023-11-02 03:34:21.361914: Current learning rate: 0.00878
1041
+ 2023-11-02 03:41:38.087406: train_loss -0.8792
1042
+ 2023-11-02 03:41:38.087581: val_loss -0.8227
1043
+ 2023-11-02 03:41:38.087660: Pseudo dice [0.8348]
1044
+ 2023-11-02 03:41:38.087747: Epoch time: 436.73 s
1045
+ 2023-11-02 03:41:39.419953:
1046
+ 2023-11-02 03:41:39.420061: Epoch 136
1047
+ 2023-11-02 03:41:39.420182: Current learning rate: 0.00877
1048
+ 2023-11-02 03:47:22.058982: train_loss -0.877
1049
+ 2023-11-02 03:47:22.059133: val_loss -0.8447
1050
+ 2023-11-02 03:47:22.059223: Pseudo dice [0.8583]
1051
+ 2023-11-02 03:47:22.059306: Epoch time: 342.64 s
1052
+ 2023-11-02 03:47:23.484094:
1053
+ 2023-11-02 03:47:23.484206: Epoch 137
1054
+ 2023-11-02 03:47:23.484329: Current learning rate: 0.00876
1055
+ 2023-11-02 03:52:59.822257: train_loss -0.8816
1056
+ 2023-11-02 03:52:59.822436: val_loss -0.8488
1057
+ 2023-11-02 03:52:59.822528: Pseudo dice [0.8607]
1058
+ 2023-11-02 03:52:59.822621: Epoch time: 336.34 s
1059
+ 2023-11-02 03:53:01.066985:
1060
+ 2023-11-02 03:53:01.067096: Epoch 138
1061
+ 2023-11-02 03:53:01.067222: Current learning rate: 0.00875
1062
+ 2023-11-02 03:58:37.340702: train_loss -0.8786
1063
+ 2023-11-02 03:58:37.340869: val_loss -0.8334
1064
+ 2023-11-02 03:58:37.340978: Pseudo dice [0.8491]
1065
+ 2023-11-02 03:58:37.341065: Epoch time: 336.27 s
1066
+ 2023-11-02 03:58:38.584766:
1067
+ 2023-11-02 03:58:38.584876: Epoch 139
1068
+ 2023-11-02 03:58:38.585009: Current learning rate: 0.00874
1069
+ 2023-11-02 04:04:14.928399: train_loss -0.8734
1070
+ 2023-11-02 04:04:14.928562: val_loss -0.8192
1071
+ 2023-11-02 04:04:14.928667: Pseudo dice [0.8315]
1072
+ 2023-11-02 04:04:14.928761: Epoch time: 336.34 s
1073
+ 2023-11-02 04:04:16.184735:
1074
+ 2023-11-02 04:04:16.184844: Epoch 140
1075
+ 2023-11-02 04:04:16.184954: Current learning rate: 0.00873
1076
+ 2023-11-02 04:09:52.789436: train_loss -0.8695
1077
+ 2023-11-02 04:09:52.789584: val_loss -0.8375
1078
+ 2023-11-02 04:09:52.789672: Pseudo dice [0.846]
1079
+ 2023-11-02 04:09:52.789758: Epoch time: 336.61 s
1080
+ 2023-11-02 04:09:54.048096:
1081
+ 2023-11-02 04:09:54.048350: Epoch 141
1082
+ 2023-11-02 04:09:54.048554: Current learning rate: 0.00872
1083
+ 2023-11-02 04:15:30.607340: train_loss -0.8483
1084
+ 2023-11-02 04:15:30.607504: val_loss -0.8207
1085
+ 2023-11-02 04:15:30.607612: Pseudo dice [0.8285]
1086
+ 2023-11-02 04:15:30.607722: Epoch time: 336.56 s
1087
+ 2023-11-02 04:15:31.864546:
1088
+ 2023-11-02 04:15:31.864675: Epoch 142
1089
+ 2023-11-02 04:15:31.864780: Current learning rate: 0.00871
1090
+ 2023-11-02 04:21:08.416313: train_loss -0.8506
1091
+ 2023-11-02 04:21:08.416520: val_loss -0.8325
1092
+ 2023-11-02 04:21:08.416610: Pseudo dice [0.847]
1093
+ 2023-11-02 04:21:08.416725: Epoch time: 336.55 s
1094
+ 2023-11-02 04:21:09.661004:
1095
+ 2023-11-02 04:21:09.661105: Epoch 143
1096
+ 2023-11-02 04:21:09.661219: Current learning rate: 0.0087
1097
+ 2023-11-02 04:26:46.112775: train_loss -0.8572
1098
+ 2023-11-02 04:26:46.112962: val_loss -0.8359
1099
+ 2023-11-02 04:26:46.113055: Pseudo dice [0.8427]
1100
+ 2023-11-02 04:26:46.113138: Epoch time: 336.45 s
1101
+ 2023-11-02 04:26:47.568575:
1102
+ 2023-11-02 04:26:47.568710: Epoch 144
1103
+ 2023-11-02 04:26:47.568815: Current learning rate: 0.00869
1104
+ 2023-11-02 04:32:24.044533: train_loss -0.8674
1105
+ 2023-11-02 04:32:24.044705: val_loss -0.843
1106
+ 2023-11-02 04:32:24.044802: Pseudo dice [0.8592]
1107
+ 2023-11-02 04:32:24.044895: Epoch time: 336.48 s
1108
+ 2023-11-02 04:32:25.292179:
1109
+ 2023-11-02 04:32:25.292291: Epoch 145
1110
+ 2023-11-02 04:32:25.292401: Current learning rate: 0.00868
1111
+ 2023-11-02 04:38:01.858025: train_loss -0.8732
1112
+ 2023-11-02 04:38:01.858180: val_loss -0.8342
1113
+ 2023-11-02 04:38:01.858269: Pseudo dice [0.8463]
1114
+ 2023-11-02 04:38:01.858351: Epoch time: 336.57 s
1115
+ 2023-11-02 04:38:03.105375:
1116
+ 2023-11-02 04:38:03.105484: Epoch 146
1117
+ 2023-11-02 04:38:03.105588: Current learning rate: 0.00868
1118
+ 2023-11-02 04:43:39.547341: train_loss -0.8819
1119
+ 2023-11-02 04:43:39.547496: val_loss -0.8269
1120
+ 2023-11-02 04:43:39.547586: Pseudo dice [0.8381]
1121
+ 2023-11-02 04:43:39.547668: Epoch time: 336.44 s
1122
+ 2023-11-02 04:43:40.791470:
1123
+ 2023-11-02 04:43:40.791583: Epoch 147
1124
+ 2023-11-02 04:43:40.791706: Current learning rate: 0.00867
1125
+ 2023-11-02 04:49:17.225561: train_loss -0.8822
1126
+ 2023-11-02 04:49:17.225704: val_loss -0.8577
1127
+ 2023-11-02 04:49:17.225790: Pseudo dice [0.8733]
1128
+ 2023-11-02 04:49:17.225877: Epoch time: 336.43 s
1129
+ 2023-11-02 04:49:18.477748:
1130
+ 2023-11-02 04:49:18.477936: Epoch 148
1131
+ 2023-11-02 04:49:18.478073: Current learning rate: 0.00866
1132
+ 2023-11-02 04:54:54.951773: train_loss -0.879
1133
+ 2023-11-02 04:54:54.951910: val_loss -0.8505
1134
+ 2023-11-02 04:54:54.952002: Pseudo dice [0.8654]
1135
+ 2023-11-02 04:54:54.952085: Epoch time: 336.47 s
1136
+ 2023-11-02 04:54:56.200920:
1137
+ 2023-11-02 04:54:56.201056: Epoch 149
1138
+ 2023-11-02 04:54:56.201190: Current learning rate: 0.00865
1139
+ 2023-11-02 05:00:32.740407: train_loss -0.8802
1140
+ 2023-11-02 05:00:32.740562: val_loss -0.8509
1141
+ 2023-11-02 05:00:32.740682: Pseudo dice [0.8636]
1142
+ 2023-11-02 05:00:32.740778: Epoch time: 336.54 s
1143
+ 2023-11-02 05:00:35.910440:
1144
+ 2023-11-02 05:00:35.910661: Epoch 150
1145
+ 2023-11-02 05:00:35.910805: Current learning rate: 0.00864
1146
+ 2023-11-02 05:06:12.417440: train_loss -0.8801
1147
+ 2023-11-02 05:06:12.417613: val_loss -0.8467
1148
+ 2023-11-02 05:06:12.417691: Pseudo dice [0.8549]
1149
+ 2023-11-02 05:06:12.417774: Epoch time: 336.51 s
1150
+ 2023-11-02 05:06:13.857074:
1151
+ 2023-11-02 05:06:13.857330: Epoch 151
1152
+ 2023-11-02 05:06:13.857524: Current learning rate: 0.00863
1153
+ 2023-11-02 05:11:50.545561: train_loss -0.8858
1154
+ 2023-11-02 05:11:50.545707: val_loss -0.8448
1155
+ 2023-11-02 05:11:50.545797: Pseudo dice [0.8561]
1156
+ 2023-11-02 05:11:50.545882: Epoch time: 336.69 s
1157
+ 2023-11-02 05:11:51.791621:
1158
+ 2023-11-02 05:11:51.791767: Epoch 152
1159
+ 2023-11-02 05:11:51.791880: Current learning rate: 0.00862
1160
+ 2023-11-02 05:17:28.392731: train_loss -0.887
1161
+ 2023-11-02 05:17:28.392887: val_loss -0.8431
1162
+ 2023-11-02 05:17:28.392982: Pseudo dice [0.8568]
1163
+ 2023-11-02 05:17:28.393075: Epoch time: 336.6 s
1164
+ 2023-11-02 05:17:29.649857:
1165
+ 2023-11-02 05:17:29.650160: Epoch 153
1166
+ 2023-11-02 05:17:29.650368: Current learning rate: 0.00861
1167
+ 2023-11-02 05:23:06.169423: train_loss -0.8871
1168
+ 2023-11-02 05:23:06.169602: val_loss -0.8486
1169
+ 2023-11-02 05:23:06.169693: Pseudo dice [0.8623]
1170
+ 2023-11-02 05:23:06.169779: Epoch time: 336.52 s
1171
+ 2023-11-02 05:23:07.469091:
1172
+ 2023-11-02 05:23:07.469342: Epoch 154
1173
+ 2023-11-02 05:23:07.469500: Current learning rate: 0.0086
1174
+ 2023-11-02 05:28:43.954341: train_loss -0.8896
1175
+ 2023-11-02 05:28:43.954503: val_loss -0.8496
1176
+ 2023-11-02 05:28:43.954581: Pseudo dice [0.8585]
1177
+ 2023-11-02 05:28:43.954662: Epoch time: 336.49 s
1178
+ 2023-11-02 05:28:45.225295:
1179
+ 2023-11-02 05:28:45.225418: Epoch 155
1180
+ 2023-11-02 05:28:45.225519: Current learning rate: 0.00859
1181
+ 2023-11-02 05:34:21.617697: train_loss -0.8883
1182
+ 2023-11-02 05:34:21.617910: val_loss -0.8367
1183
+ 2023-11-02 05:34:21.618057: Pseudo dice [0.8467]
1184
+ 2023-11-02 05:34:21.618217: Epoch time: 336.39 s
1185
+ 2023-11-02 05:34:22.886313:
1186
+ 2023-11-02 05:34:22.886415: Epoch 156
1187
+ 2023-11-02 05:34:22.886538: Current learning rate: 0.00858
1188
+ 2023-11-02 05:39:59.368596: train_loss -0.8862
1189
+ 2023-11-02 05:39:59.368779: val_loss -0.8521
1190
+ 2023-11-02 05:39:59.368874: Pseudo dice [0.8607]
1191
+ 2023-11-02 05:39:59.368967: Epoch time: 336.48 s
1192
+ 2023-11-02 05:40:00.643381:
1193
+ 2023-11-02 05:40:00.643499: Epoch 157
1194
+ 2023-11-02 05:40:00.643624: Current learning rate: 0.00858
1195
+ 2023-11-02 05:45:37.103672: train_loss -0.886
1196
+ 2023-11-02 05:45:37.103821: val_loss -0.8465
1197
+ 2023-11-02 05:45:37.103907: Pseudo dice [0.8623]
1198
+ 2023-11-02 05:45:37.103994: Epoch time: 336.46 s
1199
+ 2023-11-02 05:45:38.569716:
1200
+ 2023-11-02 05:45:38.569824: Epoch 158
1201
+ 2023-11-02 05:45:38.569945: Current learning rate: 0.00857
1202
+ 2023-11-02 05:51:15.123668: train_loss -0.8856
1203
+ 2023-11-02 05:51:15.123841: val_loss -0.8449
1204
+ 2023-11-02 05:51:15.123917: Pseudo dice [0.8559]
1205
+ 2023-11-02 05:51:15.124001: Epoch time: 336.55 s
1206
+ 2023-11-02 05:51:16.393089:
1207
+ 2023-11-02 05:51:16.393203: Epoch 159
1208
+ 2023-11-02 05:51:16.393325: Current learning rate: 0.00856
1209
+ 2023-11-02 05:56:52.995335: train_loss -0.8778
1210
+ 2023-11-02 05:56:52.995498: val_loss -0.8454
1211
+ 2023-11-02 05:56:52.995585: Pseudo dice [0.8585]
1212
+ 2023-11-02 05:56:52.995668: Epoch time: 336.6 s
1213
+ 2023-11-02 05:56:54.264992:
1214
+ 2023-11-02 05:56:54.265221: Epoch 160
1215
+ 2023-11-02 05:56:54.265382: Current learning rate: 0.00855
1216
+ 2023-11-02 06:02:30.761387: train_loss -0.8732
1217
+ 2023-11-02 06:02:30.761570: val_loss -0.8527
1218
+ 2023-11-02 06:02:30.761666: Pseudo dice [0.8614]
1219
+ 2023-11-02 06:02:30.761760: Epoch time: 336.5 s
1220
+ 2023-11-02 06:02:32.029355:
1221
+ 2023-11-02 06:02:32.029547: Epoch 161
1222
+ 2023-11-02 06:02:32.029713: Current learning rate: 0.00854
1223
+ 2023-11-02 06:08:08.447033: train_loss -0.8827
1224
+ 2023-11-02 06:08:08.447202: val_loss -0.8413
1225
+ 2023-11-02 06:08:08.447297: Pseudo dice [0.8586]
1226
+ 2023-11-02 06:08:08.447391: Epoch time: 336.42 s
1227
+ 2023-11-02 06:08:09.725256:
1228
+ 2023-11-02 06:08:09.725421: Epoch 162
1229
+ 2023-11-02 06:08:09.725579: Current learning rate: 0.00853
1230
+ 2023-11-02 06:13:46.159787: train_loss -0.8868
1231
+ 2023-11-02 06:13:46.159935: val_loss -0.8427
1232
+ 2023-11-02 06:13:46.160023: Pseudo dice [0.8553]
1233
+ 2023-11-02 06:13:46.160109: Epoch time: 336.44 s
1234
+ 2023-11-02 06:13:47.428374:
1235
+ 2023-11-02 06:13:47.428583: Epoch 163
1236
+ 2023-11-02 06:13:47.428732: Current learning rate: 0.00852
1237
+ 2023-11-02 06:19:23.879079: train_loss -0.8858
1238
+ 2023-11-02 06:19:23.879317: val_loss -0.8312
1239
+ 2023-11-02 06:19:23.879421: Pseudo dice [0.8406]
1240
+ 2023-11-02 06:19:23.879503: Epoch time: 336.45 s
1241
+ 2023-11-02 06:19:25.144032:
1242
+ 2023-11-02 06:19:25.144132: Epoch 164
1243
+ 2023-11-02 06:19:25.144244: Current learning rate: 0.00851
1244
+ 2023-11-02 06:25:01.559911: train_loss -0.8807
1245
+ 2023-11-02 06:25:01.560055: val_loss -0.8419
1246
+ 2023-11-02 06:25:01.560145: Pseudo dice [0.8512]
1247
+ 2023-11-02 06:25:01.560227: Epoch time: 336.42 s
1248
+ 2023-11-02 06:25:02.969034:
1249
+ 2023-11-02 06:25:02.969216: Epoch 165
1250
+ 2023-11-02 06:25:02.969375: Current learning rate: 0.0085
1251
+ 2023-11-02 06:30:39.490213: train_loss -0.8821
1252
+ 2023-11-02 06:30:39.490362: val_loss -0.8389
1253
+ 2023-11-02 06:30:39.490455: Pseudo dice [0.8524]
1254
+ 2023-11-02 06:30:39.490537: Epoch time: 336.52 s
1255
+ 2023-11-02 06:30:40.715593:
1256
+ 2023-11-02 06:30:40.715716: Epoch 166
1257
+ 2023-11-02 06:30:40.715817: Current learning rate: 0.00849
1258
+ 2023-11-02 06:36:17.175295: train_loss -0.8832
1259
+ 2023-11-02 06:36:17.175459: val_loss -0.8593
1260
+ 2023-11-02 06:36:17.175535: Pseudo dice [0.8721]
1261
+ 2023-11-02 06:36:17.175618: Epoch time: 336.46 s
1262
+ 2023-11-02 06:36:18.399064:
1263
+ 2023-11-02 06:36:18.399180: Epoch 167
1264
+ 2023-11-02 06:36:18.399296: Current learning rate: 0.00848
1265
+ 2023-11-02 06:41:54.923880: train_loss -0.8846
1266
+ 2023-11-02 06:41:54.924026: val_loss -0.8387
1267
+ 2023-11-02 06:41:54.924097: Pseudo dice [0.8489]
1268
+ 2023-11-02 06:41:54.924182: Epoch time: 336.53 s
1269
+ 2023-11-02 06:41:56.176949:
1270
+ 2023-11-02 06:41:56.177210: Epoch 168
1271
+ 2023-11-02 06:41:56.177419: Current learning rate: 0.00847
1272
+ 2023-11-02 06:47:32.692571: train_loss -0.8689
1273
+ 2023-11-02 06:47:32.692750: val_loss -0.8447
1274
+ 2023-11-02 06:47:32.692827: Pseudo dice [0.8529]
1275
+ 2023-11-02 06:47:32.692909: Epoch time: 336.52 s
1276
+ 2023-11-02 06:47:33.942803:
1277
+ 2023-11-02 06:47:33.942922: Epoch 169
1278
+ 2023-11-02 06:47:33.943023: Current learning rate: 0.00847
1279
+ 2023-11-02 06:53:10.495537: train_loss -0.8698
1280
+ 2023-11-02 06:53:10.495706: val_loss -0.8395
1281
+ 2023-11-02 06:53:10.495784: Pseudo dice [0.8497]
1282
+ 2023-11-02 06:53:10.495866: Epoch time: 336.55 s
1283
+ 2023-11-02 06:53:11.747387:
1284
+ 2023-11-02 06:53:11.747509: Epoch 170
1285
+ 2023-11-02 06:53:11.747622: Current learning rate: 0.00846
1286
+ 2023-11-02 06:58:48.228090: train_loss -0.8739
1287
+ 2023-11-02 06:58:48.228245: val_loss -0.8433
1288
+ 2023-11-02 06:58:48.228341: Pseudo dice [0.8626]
1289
+ 2023-11-02 06:58:48.228447: Epoch time: 336.48 s
1290
+ 2023-11-02 06:58:49.485036:
1291
+ 2023-11-02 06:58:49.485212: Epoch 171
1292
+ 2023-11-02 06:58:49.485367: Current learning rate: 0.00845
1293
+ 2023-11-02 07:04:25.989341: train_loss -0.8819
1294
+ 2023-11-02 07:04:25.989492: val_loss -0.8472
1295
+ 2023-11-02 07:04:25.989589: Pseudo dice [0.8549]
1296
+ 2023-11-02 07:04:25.989682: Epoch time: 336.51 s
1297
+ 2023-11-02 07:04:27.425611:
1298
+ 2023-11-02 07:04:27.425783: Epoch 172
1299
+ 2023-11-02 07:04:27.425948: Current learning rate: 0.00844
1300
+ 2023-11-02 07:10:03.930588: train_loss -0.8866
1301
+ 2023-11-02 07:10:03.930730: val_loss -0.8289
1302
+ 2023-11-02 07:10:03.930820: Pseudo dice [0.839]
1303
+ 2023-11-02 07:10:03.930925: Epoch time: 336.51 s
1304
+ 2023-11-02 07:10:05.210962:
1305
+ 2023-11-02 07:10:05.211163: Epoch 173
1306
+ 2023-11-02 07:10:05.211333: Current learning rate: 0.00843
1307
+ 2023-11-02 07:15:41.824708: train_loss -0.8902
1308
+ 2023-11-02 07:15:41.824867: val_loss -0.8471
1309
+ 2023-11-02 07:15:41.824956: Pseudo dice [0.8567]
1310
+ 2023-11-02 07:15:41.825038: Epoch time: 336.61 s
1311
+ 2023-11-02 07:15:43.081784:
1312
+ 2023-11-02 07:15:43.082023: Epoch 174
1313
+ 2023-11-02 07:15:43.082184: Current learning rate: 0.00842
1314
+ 2023-11-02 07:21:19.457677: train_loss -0.8905
1315
+ 2023-11-02 07:21:19.457830: val_loss -0.8435
1316
+ 2023-11-02 07:21:19.457908: Pseudo dice [0.8522]
1317
+ 2023-11-02 07:21:19.457990: Epoch time: 336.38 s
1318
+ 2023-11-02 07:21:20.711805:
1319
+ 2023-11-02 07:21:20.711915: Epoch 175
1320
+ 2023-11-02 07:21:20.712030: Current learning rate: 0.00841
1321
+ 2023-11-02 07:26:57.132309: train_loss -0.89
1322
+ 2023-11-02 07:26:57.132491: val_loss -0.8507
1323
+ 2023-11-02 07:26:57.132582: Pseudo dice [0.8639]
1324
+ 2023-11-02 07:26:57.132693: Epoch time: 336.42 s
1325
+ 2023-11-02 07:26:58.386932:
1326
+ 2023-11-02 07:26:58.387110: Epoch 176
1327
+ 2023-11-02 07:26:58.387258: Current learning rate: 0.0084
1328
+ 2023-11-02 07:32:34.745917: train_loss -0.8906
1329
+ 2023-11-02 07:32:34.746079: val_loss -0.8578
1330
+ 2023-11-02 07:32:34.746187: Pseudo dice [0.8703]
1331
+ 2023-11-02 07:32:34.746269: Epoch time: 336.36 s
1332
+ 2023-11-02 07:32:36.028688:
1333
+ 2023-11-02 07:32:36.028808: Epoch 177
1334
+ 2023-11-02 07:32:36.028922: Current learning rate: 0.00839
1335
+ 2023-11-02 07:38:12.473143: train_loss -0.894
1336
+ 2023-11-02 07:38:12.473288: val_loss -0.8439
1337
+ 2023-11-02 07:38:12.473376: Pseudo dice [0.8564]
1338
+ 2023-11-02 07:38:12.473461: Epoch time: 336.45 s
1339
+ 2023-11-02 07:38:13.728982:
1340
+ 2023-11-02 07:38:13.729083: Epoch 178
1341
+ 2023-11-02 07:38:13.729206: Current learning rate: 0.00838
1342
+ 2023-11-02 07:43:50.374558: train_loss -0.8942
1343
+ 2023-11-02 07:43:50.374717: val_loss -0.8489
1344
+ 2023-11-02 07:43:50.374814: Pseudo dice [0.8571]
1345
+ 2023-11-02 07:43:50.374906: Epoch time: 336.65 s
1346
+ 2023-11-02 07:43:51.806198:
1347
+ 2023-11-02 07:43:51.806307: Epoch 179
1348
+ 2023-11-02 07:43:51.806439: Current learning rate: 0.00837
1349
+ 2023-11-02 07:49:28.426750: train_loss -0.8963
1350
+ 2023-11-02 07:49:28.426895: val_loss -0.8629
1351
+ 2023-11-02 07:49:28.426983: Pseudo dice [0.8739]
1352
+ 2023-11-02 07:49:28.427080: Epoch time: 336.62 s
1353
+ 2023-11-02 07:49:28.427154: Yayy! New best EMA pseudo Dice: 0.8582
1354
+ 2023-11-02 07:49:31.521547:
1355
+ 2023-11-02 07:49:31.521681: Epoch 180
1356
+ 2023-11-02 07:49:31.521807: Current learning rate: 0.00836
1357
+ 2023-11-02 07:55:08.137821: train_loss -0.8944
1358
+ 2023-11-02 07:55:08.137968: val_loss -0.84
1359
+ 2023-11-02 07:55:08.138054: Pseudo dice [0.8545]
1360
+ 2023-11-02 07:55:08.138141: Epoch time: 336.62 s
1361
+ 2023-11-02 07:55:09.390080:
1362
+ 2023-11-02 07:55:09.390270: Epoch 181
1363
+ 2023-11-02 07:55:09.390459: Current learning rate: 0.00836
1364
+ 2023-11-02 08:00:46.176044: train_loss -0.8959
1365
+ 2023-11-02 08:00:46.176225: val_loss -0.8497
1366
+ 2023-11-02 08:00:46.176321: Pseudo dice [0.8631]
1367
+ 2023-11-02 08:00:46.176415: Epoch time: 336.79 s
1368
+ 2023-11-02 08:00:46.176495: Yayy! New best EMA pseudo Dice: 0.8583
1369
+ 2023-11-02 08:00:49.176341:
1370
+ 2023-11-02 08:00:49.176475: Epoch 182
1371
+ 2023-11-02 08:00:49.176578: Current learning rate: 0.00835
1372
+ 2023-11-02 08:06:25.699219: train_loss -0.8882
1373
+ 2023-11-02 08:06:25.699363: val_loss -0.8308
1374
+ 2023-11-02 08:06:25.699455: Pseudo dice [0.8419]
1375
+ 2023-11-02 08:06:25.699539: Epoch time: 336.52 s
1376
+ 2023-11-02 08:06:26.944408:
1377
+ 2023-11-02 08:06:26.944624: Epoch 183
1378
+ 2023-11-02 08:06:26.944795: Current learning rate: 0.00834
1379
+ 2023-11-02 08:12:03.480973: train_loss -0.8865
1380
+ 2023-11-02 08:12:03.481128: val_loss -0.8447
1381
+ 2023-11-02 08:12:03.481220: Pseudo dice [0.8512]
1382
+ 2023-11-02 08:12:03.481302: Epoch time: 336.54 s
1383
+ 2023-11-02 08:12:04.722124:
1384
+ 2023-11-02 08:12:04.722226: Epoch 184
1385
+ 2023-11-02 08:12:04.722338: Current learning rate: 0.00833
1386
+ 2023-11-02 08:17:41.305548: train_loss -0.8913
1387
+ 2023-11-02 08:17:41.305701: val_loss -0.8584
1388
+ 2023-11-02 08:17:41.305792: Pseudo dice [0.8679]
1389
+ 2023-11-02 08:17:41.305874: Epoch time: 336.58 s
1390
+ 2023-11-02 08:17:42.558653:
1391
+ 2023-11-02 08:17:42.558839: Epoch 185
1392
+ 2023-11-02 08:17:42.558981: Current learning rate: 0.00832
1393
+ 2023-11-02 08:23:19.212474: train_loss -0.8822
1394
+ 2023-11-02 08:23:19.212619: val_loss -0.851
1395
+ 2023-11-02 08:23:19.212720: Pseudo dice [0.8662]
1396
+ 2023-11-02 08:23:19.212803: Epoch time: 336.65 s
1397
+ 2023-11-02 08:23:20.641562:
1398
+ 2023-11-02 08:23:20.641672: Epoch 186
1399
+ 2023-11-02 08:23:20.641794: Current learning rate: 0.00831
1400
+ 2023-11-02 08:28:57.252976: train_loss -0.8894
1401
+ 2023-11-02 08:28:57.253133: val_loss -0.8533
1402
+ 2023-11-02 08:28:57.253225: Pseudo dice [0.8642]
1403
+ 2023-11-02 08:28:57.253307: Epoch time: 336.61 s
1404
+ 2023-11-02 08:28:57.253377: Yayy! New best EMA pseudo Dice: 0.8588
1405
+ 2023-11-02 08:29:00.265479:
1406
+ 2023-11-02 08:29:00.265587: Epoch 187
1407
+ 2023-11-02 08:29:00.265700: Current learning rate: 0.0083
1408
+ 2023-11-02 08:34:36.875356: train_loss -0.8913
1409
+ 2023-11-02 08:34:36.875510: val_loss -0.8305
1410
+ 2023-11-02 08:34:36.875602: Pseudo dice [0.8445]
1411
+ 2023-11-02 08:34:36.875686: Epoch time: 336.61 s
1412
+ 2023-11-02 08:34:38.124214:
1413
+ 2023-11-02 08:34:38.124323: Epoch 188
1414
+ 2023-11-02 08:34:38.124439: Current learning rate: 0.00829
1415
+ 2023-11-02 08:40:14.696687: train_loss -0.8921
1416
+ 2023-11-02 08:40:14.696843: val_loss -0.8541
1417
+ 2023-11-02 08:40:14.696919: Pseudo dice [0.8693]
1418
+ 2023-11-02 08:40:14.697002: Epoch time: 336.57 s
1419
+ 2023-11-02 08:40:16.154643:
1420
+ 2023-11-02 08:40:16.154743: Epoch 189
1421
+ 2023-11-02 08:40:16.154857: Current learning rate: 0.00828
1422
+ 2023-11-02 08:45:52.751851: train_loss -0.8918
1423
+ 2023-11-02 08:45:52.752047: val_loss -0.8356
1424
+ 2023-11-02 08:45:52.752205: Pseudo dice [0.8476]
1425
+ 2023-11-02 08:45:52.752317: Epoch time: 336.6 s
1426
+ 2023-11-02 08:45:54.004484:
1427
+ 2023-11-02 08:45:54.004593: Epoch 190
1428
+ 2023-11-02 08:45:54.004703: Current learning rate: 0.00827
1429
+ 2023-11-02 08:51:30.573787: train_loss -0.8952
1430
+ 2023-11-02 08:51:30.573962: val_loss -0.8454
1431
+ 2023-11-02 08:51:30.574049: Pseudo dice [0.8647]
1432
+ 2023-11-02 08:51:30.574131: Epoch time: 336.57 s
1433
+ 2023-11-02 08:51:31.823194:
1434
+ 2023-11-02 08:51:31.823426: Epoch 191
1435
+ 2023-11-02 08:51:31.823625: Current learning rate: 0.00826
1436
+ 2023-11-02 08:57:08.351663: train_loss -0.8934
1437
+ 2023-11-02 08:57:08.351806: val_loss -0.8598
1438
+ 2023-11-02 08:57:08.351899: Pseudo dice [0.873]
1439
+ 2023-11-02 08:57:08.351992: Epoch time: 336.53 s
1440
+ 2023-11-02 08:57:08.352064: Yayy! New best EMA pseudo Dice: 0.8597
1441
+ 2023-11-02 08:57:11.603368:
1442
+ 2023-11-02 08:57:11.603498: Epoch 192
1443
+ 2023-11-02 08:57:11.603601: Current learning rate: 0.00825
1444
+ 2023-11-02 09:02:48.177767: train_loss -0.8953
1445
+ 2023-11-02 09:02:48.177918: val_loss -0.8491
1446
+ 2023-11-02 09:02:48.178014: Pseudo dice [0.8635]
1447
+ 2023-11-02 09:02:48.178108: Epoch time: 336.58 s
1448
+ 2023-11-02 09:02:48.178187: Yayy! New best EMA pseudo Dice: 0.8601
1449
+ 2023-11-02 09:02:51.311234:
1450
+ 2023-11-02 09:02:51.311342: Epoch 193
1451
+ 2023-11-02 09:02:51.311458: Current learning rate: 0.00824
1452
+ 2023-11-02 09:08:27.934379: train_loss -0.8908
1453
+ 2023-11-02 09:08:27.934547: val_loss -0.8162
1454
+ 2023-11-02 09:08:27.934641: Pseudo dice [0.8274]
1455
+ 2023-11-02 09:08:27.934734: Epoch time: 336.62 s
1456
+ 2023-11-02 09:08:29.201547:
1457
+ 2023-11-02 09:08:29.201812: Epoch 194
1458
+ 2023-11-02 09:08:29.201998: Current learning rate: 0.00824
1459
+ 2023-11-02 09:14:05.718349: train_loss -0.8931
1460
+ 2023-11-02 09:14:05.718520: val_loss -0.8568
1461
+ 2023-11-02 09:14:05.718597: Pseudo dice [0.866]
1462
+ 2023-11-02 09:14:05.718680: Epoch time: 336.52 s
1463
+ 2023-11-02 09:14:06.988049:
1464
+ 2023-11-02 09:14:06.988153: Epoch 195
1465
+ 2023-11-02 09:14:06.988266: Current learning rate: 0.00823
1466
+ 2023-11-02 09:19:43.490846: train_loss -0.8935
1467
+ 2023-11-02 09:19:43.491001: val_loss -0.8465
1468
+ 2023-11-02 09:19:43.491088: Pseudo dice [0.8612]
1469
+ 2023-11-02 09:19:43.491171: Epoch time: 336.5 s
1470
+ 2023-11-02 09:19:44.759670:
1471
+ 2023-11-02 09:19:44.759772: Epoch 196
1472
+ 2023-11-02 09:19:44.759888: Current learning rate: 0.00822
1473
+ 2023-11-02 09:25:21.303237: train_loss -0.8944
1474
+ 2023-11-02 09:25:21.303416: val_loss -0.8461
1475
+ 2023-11-02 09:25:21.303519: Pseudo dice [0.859]
1476
+ 2023-11-02 09:25:21.303631: Epoch time: 336.54 s
1477
+ 2023-11-02 09:25:22.576945:
1478
+ 2023-11-02 09:25:22.577114: Epoch 197
1479
+ 2023-11-02 09:25:22.577304: Current learning rate: 0.00821
1480
+ 2023-11-02 09:30:58.909439: train_loss -0.8947
1481
+ 2023-11-02 09:30:58.909585: val_loss -0.8494
1482
+ 2023-11-02 09:30:58.909674: Pseudo dice [0.8637]
1483
+ 2023-11-02 09:30:58.909755: Epoch time: 336.33 s
1484
+ 2023-11-02 09:31:00.176786:
1485
+ 2023-11-02 09:31:00.176992: Epoch 198
1486
+ 2023-11-02 09:31:00.177112: Current learning rate: 0.0082
1487
+ 2023-11-02 09:36:36.456280: train_loss -0.8998
1488
+ 2023-11-02 09:36:36.456426: val_loss -0.8313
1489
+ 2023-11-02 09:36:36.456531: Pseudo dice [0.8493]
1490
+ 2023-11-02 09:36:36.456623: Epoch time: 336.28 s
1491
+ 2023-11-02 09:36:37.915556:
1492
+ 2023-11-02 09:36:37.915673: Epoch 199
1493
+ 2023-11-02 09:36:37.915790: Current learning rate: 0.00819
1494
+ 2023-11-02 09:42:14.325220: train_loss -0.8968
1495
+ 2023-11-02 09:42:14.325391: val_loss -0.8369
1496
+ 2023-11-02 09:42:14.325536: Pseudo dice [0.8481]
1497
+ 2023-11-02 09:42:14.325662: Epoch time: 336.41 s
1498
+ 2023-11-02 09:42:17.267833:
1499
+ 2023-11-02 09:42:17.267948: Epoch 200
1500
+ 2023-11-02 09:42:17.268070: Current learning rate: 0.00818
1501
+ 2023-11-02 09:47:53.758134: train_loss -0.8889
1502
+ 2023-11-02 09:47:53.758301: val_loss -0.8427
1503
+ 2023-11-02 09:47:53.758378: Pseudo dice [0.8549]
1504
+ 2023-11-02 09:47:53.758462: Epoch time: 336.49 s
1505
+ 2023-11-02 09:47:55.025453:
1506
+ 2023-11-02 09:47:55.025568: Epoch 201
1507
+ 2023-11-02 09:47:55.025690: Current learning rate: 0.00817
1508
+ 2023-11-02 09:53:31.463142: train_loss -0.8922
1509
+ 2023-11-02 09:53:31.463310: val_loss -0.8458
1510
+ 2023-11-02 09:53:31.463404: Pseudo dice [0.8546]
1511
+ 2023-11-02 09:53:31.463498: Epoch time: 336.44 s
1512
+ 2023-11-02 09:53:32.745644:
1513
+ 2023-11-02 09:53:32.745833: Epoch 202
1514
+ 2023-11-02 09:53:32.746027: Current learning rate: 0.00816
1515
+ 2023-11-02 09:59:09.004851: train_loss -0.8903
1516
+ 2023-11-02 09:59:09.005041: val_loss -0.8347
1517
+ 2023-11-02 09:59:09.005147: Pseudo dice [0.8454]
1518
+ 2023-11-02 09:59:09.005234: Epoch time: 336.26 s
1519
+ 2023-11-02 09:59:10.298931:
1520
+ 2023-11-02 09:59:10.299056: Epoch 203
1521
+ 2023-11-02 09:59:10.299160: Current learning rate: 0.00815
1522
+ 2023-11-02 10:04:46.671175: train_loss -0.8876
1523
+ 2023-11-02 10:04:46.671387: val_loss -0.847
1524
+ 2023-11-02 10:04:46.671477: Pseudo dice [0.8547]
1525
+ 2023-11-02 10:04:46.671565: Epoch time: 336.37 s
1526
+ 2023-11-02 10:04:47.951658:
1527
+ 2023-11-02 10:04:47.951768: Epoch 204
1528
+ 2023-11-02 10:04:47.951892: Current learning rate: 0.00814
1529
+ 2023-11-02 10:10:24.179533: train_loss -0.8935
1530
+ 2023-11-02 10:10:24.179698: val_loss -0.8542
1531
+ 2023-11-02 10:10:24.179775: Pseudo dice [0.8641]
1532
+ 2023-11-02 10:10:24.179858: Epoch time: 336.23 s
1533
+ 2023-11-02 10:10:25.455756:
1534
+ 2023-11-02 10:10:25.455969: Epoch 205
1535
+ 2023-11-02 10:10:25.456186: Current learning rate: 0.00813
1536
+ 2023-11-02 10:16:01.701132: train_loss -0.8938
1537
+ 2023-11-02 10:16:01.701276: val_loss -0.8524
1538
+ 2023-11-02 10:16:01.701367: Pseudo dice [0.8621]
1539
+ 2023-11-02 10:16:01.701451: Epoch time: 336.25 s
1540
+ 2023-11-02 10:16:03.086668:
1541
+ 2023-11-02 10:16:03.086829: Epoch 206
1542
+ 2023-11-02 10:16:03.087015: Current learning rate: 0.00813
1543
+ 2023-11-02 10:21:39.263720: train_loss -0.8959
1544
+ 2023-11-02 10:21:39.263888: val_loss -0.8483
1545
+ 2023-11-02 10:21:39.263988: Pseudo dice [0.8613]
1546
+ 2023-11-02 10:21:39.264080: Epoch time: 336.18 s
1547
+ 2023-11-02 10:21:40.449980:
1548
+ 2023-11-02 10:21:40.450174: Epoch 207
1549
+ 2023-11-02 10:21:40.450378: Current learning rate: 0.00812
1550
+ 2023-11-02 10:27:16.753075: train_loss -0.8949
1551
+ 2023-11-02 10:27:16.753220: val_loss -0.8543
1552
+ 2023-11-02 10:27:16.753330: Pseudo dice [0.8661]
1553
+ 2023-11-02 10:27:16.753423: Epoch time: 336.3 s
1554
+ 2023-11-02 10:27:17.940347:
1555
+ 2023-11-02 10:27:17.940459: Epoch 208
1556
+ 2023-11-02 10:27:17.940582: Current learning rate: 0.00811
1557
+ 2023-11-02 10:32:54.347863: train_loss -0.8944
1558
+ 2023-11-02 10:32:54.348014: val_loss -0.8491
1559
+ 2023-11-02 10:32:54.348108: Pseudo dice [0.862]
1560
+ 2023-11-02 10:32:54.348190: Epoch time: 336.41 s
1561
+ 2023-11-02 10:32:55.533081:
1562
+ 2023-11-02 10:32:55.533195: Epoch 209
1563
+ 2023-11-02 10:32:55.533320: Current learning rate: 0.0081
1564
+ 2023-11-02 10:38:32.022746: train_loss -0.9001
1565
+ 2023-11-02 10:38:32.022900: val_loss -0.853
1566
+ 2023-11-02 10:38:32.022987: Pseudo dice [0.8624]
1567
+ 2023-11-02 10:38:32.023073: Epoch time: 336.49 s
1568
+ 2023-11-02 10:38:33.212431:
1569
+ 2023-11-02 10:38:33.212542: Epoch 210
1570
+ 2023-11-02 10:38:33.212663: Current learning rate: 0.00809
1571
+ 2023-11-02 10:44:09.689469: train_loss -0.899
1572
+ 2023-11-02 10:44:09.689613: val_loss -0.8474
1573
+ 2023-11-02 10:44:09.689705: Pseudo dice [0.8615]
1574
+ 2023-11-02 10:44:09.689787: Epoch time: 336.48 s
1575
+ 2023-11-02 10:44:10.879737:
1576
+ 2023-11-02 10:44:10.879846: Epoch 211
1577
+ 2023-11-02 10:44:10.879959: Current learning rate: 0.00808
1578
+ 2023-11-02 10:49:47.306702: train_loss -0.9003
1579
+ 2023-11-02 10:49:47.306857: val_loss -0.852
1580
+ 2023-11-02 10:49:47.306948: Pseudo dice [0.859]
1581
+ 2023-11-02 10:49:47.307032: Epoch time: 336.43 s
1582
+ 2023-11-02 10:49:48.499423:
1583
+ 2023-11-02 10:49:48.499641: Epoch 212
1584
+ 2023-11-02 10:49:48.499768: Current learning rate: 0.00807
1585
+ 2023-11-02 10:55:24.934619: train_loss -0.8958
1586
+ 2023-11-02 10:55:24.934766: val_loss -0.8389
1587
+ 2023-11-02 10:55:24.934855: Pseudo dice [0.8532]
1588
+ 2023-11-02 10:55:24.934941: Epoch time: 336.44 s
1589
+ 2023-11-02 10:55:26.320691:
1590
+ 2023-11-02 10:55:26.320805: Epoch 213
1591
+ 2023-11-02 10:55:26.320918: Current learning rate: 0.00806
1592
+ 2023-11-02 11:01:02.805871: train_loss -0.8949
1593
+ 2023-11-02 11:01:02.806040: val_loss -0.8391
1594
+ 2023-11-02 11:01:02.806135: Pseudo dice [0.8479]
1595
+ 2023-11-02 11:01:02.806229: Epoch time: 336.49 s
1596
+ 2023-11-02 11:01:03.997950:
1597
+ 2023-11-02 11:01:03.998064: Epoch 214
1598
+ 2023-11-02 11:01:03.998189: Current learning rate: 0.00805
1599
+ 2023-11-02 11:06:40.464194: train_loss -0.9005
1600
+ 2023-11-02 11:06:40.464359: val_loss -0.8434
1601
+ 2023-11-02 11:06:40.464460: Pseudo dice [0.8529]
1602
+ 2023-11-02 11:06:40.464553: Epoch time: 336.47 s
1603
+ 2023-11-02 11:06:41.661540:
1604
+ 2023-11-02 11:06:41.661709: Epoch 215
1605
+ 2023-11-02 11:06:41.661873: Current learning rate: 0.00804
1606
+ 2023-11-02 11:12:18.132850: train_loss -0.8947
1607
+ 2023-11-02 11:12:18.133024: val_loss -0.8542
1608
+ 2023-11-02 11:12:18.133123: Pseudo dice [0.8631]
1609
+ 2023-11-02 11:12:18.133206: Epoch time: 336.47 s
1610
+ 2023-11-02 11:12:19.325226:
1611
+ 2023-11-02 11:12:19.325339: Epoch 216
1612
+ 2023-11-02 11:12:19.325455: Current learning rate: 0.00803
1613
+ 2023-11-02 11:17:55.845577: train_loss -0.8905
1614
+ 2023-11-02 11:17:55.845737: val_loss -0.8323
1615
+ 2023-11-02 11:17:55.845835: Pseudo dice [0.8476]
1616
+ 2023-11-02 11:17:55.845950: Epoch time: 336.52 s
1617
+ 2023-11-02 11:17:57.045592:
1618
+ 2023-11-02 11:17:57.045803: Epoch 217
1619
+ 2023-11-02 11:17:57.045985: Current learning rate: 0.00802
1620
+ 2023-11-02 11:23:33.697549: train_loss -0.8816
1621
+ 2023-11-02 11:23:33.697695: val_loss -0.8438
1622
+ 2023-11-02 11:23:33.697783: Pseudo dice [0.8588]
1623
+ 2023-11-02 11:23:33.697869: Epoch time: 336.65 s
1624
+ 2023-11-02 11:23:34.888982:
1625
+ 2023-11-02 11:23:34.889183: Epoch 218
1626
+ 2023-11-02 11:23:34.889362: Current learning rate: 0.00801
1627
+ 2023-11-02 11:29:11.418984: train_loss -0.8918
1628
+ 2023-11-02 11:29:11.419157: val_loss -0.8533
1629
+ 2023-11-02 11:29:11.419235: Pseudo dice [0.8662]
1630
+ 2023-11-02 11:29:11.419317: Epoch time: 336.53 s
1631
+ 2023-11-02 11:29:12.616125:
1632
+ 2023-11-02 11:29:12.616228: Epoch 219
1633
+ 2023-11-02 11:29:12.616343: Current learning rate: 0.00801
1634
+ 2023-11-02 11:34:49.231112: train_loss -0.8922
1635
+ 2023-11-02 11:34:49.231257: val_loss -0.8528
1636
+ 2023-11-02 11:34:49.231345: Pseudo dice [0.8644]
1637
+ 2023-11-02 11:34:49.231432: Epoch time: 336.62 s
1638
+ 2023-11-02 11:34:50.425376:
1639
+ 2023-11-02 11:34:50.425475: Epoch 220
1640
+ 2023-11-02 11:34:50.425598: Current learning rate: 0.008
1641
+ 2023-11-02 11:40:27.207713: train_loss -0.8793
1642
+ 2023-11-02 11:40:27.207882: val_loss -0.8487
1643
+ 2023-11-02 11:40:27.207980: Pseudo dice [0.8642]
1644
+ 2023-11-02 11:40:27.208075: Epoch time: 336.78 s
1645
+ 2023-11-02 11:40:28.585015:
1646
+ 2023-11-02 11:40:28.585288: Epoch 221
1647
+ 2023-11-02 11:40:28.585511: Current learning rate: 0.00799
1648
+ 2023-11-02 11:46:05.252433: train_loss -0.8714
1649
+ 2023-11-02 11:46:05.252622: val_loss -0.8014
1650
+ 2023-11-02 11:46:05.252711: Pseudo dice [0.8061]
1651
+ 2023-11-02 11:46:05.252794: Epoch time: 336.67 s
1652
+ 2023-11-02 11:46:06.469026:
1653
+ 2023-11-02 11:46:06.469152: Epoch 222
1654
+ 2023-11-02 11:46:06.469255: Current learning rate: 0.00798
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_3_11_49_25.txt ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}
14
+
15
+ 2023-11-03 11:49:28.963863: unpacking dataset...
16
+ 2023-11-03 11:49:32.980564: unpacking done...
17
+ 2023-11-03 11:49:32.981157: do_dummy_2d_data_aug: False
18
+ 2023-11-03 11:49:32.981657: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/splits_final.json
19
+ 2023-11-03 11:49:32.989707: The split file contains 5 splits.
20
+ 2023-11-03 11:49:32.989854: Desired fold for training: 0
21
+ 2023-11-03 11:49:32.989978: This split has 48 training and 12 validation cases.
22
+ 2023-11-03 11:49:54.554838: Unable to plot network architecture:
23
+ 2023-11-03 11:49:54.554923: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-11-03 11:49:54.637707:
25
+ 2023-11-03 11:49:54.637788: Epoch 200
26
+ 2023-11-03 11:49:54.637891: Current learning rate: 0.00818
27
+ 2023-11-03 11:57:23.834412: train_loss -0.8976
28
+ 2023-11-03 11:57:23.834646: val_loss -0.8482
29
+ 2023-11-03 11:57:23.834730: Pseudo dice [0.8567]
30
+ 2023-11-03 11:57:23.834828: Epoch time: 449.2 s
31
+ 2023-11-03 11:57:25.181556:
32
+ 2023-11-03 11:57:25.181662: Epoch 201
33
+ 2023-11-03 11:57:25.181778: Current learning rate: 0.00817
34
+ 2023-11-03 12:03:04.355047: train_loss -0.8953
35
+ 2023-11-03 12:03:04.355210: val_loss -0.8447
36
+ 2023-11-03 12:03:04.355286: Pseudo dice [0.8579]
37
+ 2023-11-03 12:03:04.355367: Epoch time: 339.17 s
38
+ 2023-11-03 12:03:05.596945:
39
+ 2023-11-03 12:03:05.597065: Epoch 202
40
+ 2023-11-03 12:03:05.597179: Current learning rate: 0.00816
41
+ 2023-11-03 12:08:44.887986: train_loss -0.8925
42
+ 2023-11-03 12:08:44.888149: val_loss -0.8507
43
+ 2023-11-03 12:08:44.888225: Pseudo dice [0.8639]
44
+ 2023-11-03 12:08:44.888306: Epoch time: 339.29 s
45
+ 2023-11-03 12:08:46.136761:
46
+ 2023-11-03 12:08:46.136868: Epoch 203
47
+ 2023-11-03 12:08:46.136971: Current learning rate: 0.00815
48
+ 2023-11-03 12:14:25.321250: train_loss -0.8671
49
+ 2023-11-03 12:14:25.321414: val_loss -0.8431
50
+ 2023-11-03 12:14:25.321506: Pseudo dice [0.8539]
51
+ 2023-11-03 12:14:25.321588: Epoch time: 339.19 s
52
+ 2023-11-03 12:14:26.733307:
53
+ 2023-11-03 12:14:26.733414: Epoch 204
54
+ 2023-11-03 12:14:26.733532: Current learning rate: 0.00814
55
+ 2023-11-03 12:20:05.979846: train_loss -0.8744
56
+ 2023-11-03 12:20:05.980002: val_loss -0.8338
57
+ 2023-11-03 12:20:05.980094: Pseudo dice [0.8453]
58
+ 2023-11-03 12:20:05.980177: Epoch time: 339.25 s
59
+ 2023-11-03 12:20:07.216573:
60
+ 2023-11-03 12:20:07.216707: Epoch 205
61
+ 2023-11-03 12:20:07.216811: Current learning rate: 0.00813
62
+ 2023-11-03 12:25:46.333008: train_loss -0.878
63
+ 2023-11-03 12:25:46.333180: val_loss -0.8381
64
+ 2023-11-03 12:25:46.333258: Pseudo dice [0.8513]
65
+ 2023-11-03 12:25:46.333339: Epoch time: 339.12 s
66
+ 2023-11-03 12:25:47.491110:
67
+ 2023-11-03 12:25:47.491219: Epoch 206
68
+ 2023-11-03 12:25:47.491321: Current learning rate: 0.00813
69
+ 2023-11-03 12:31:26.593431: train_loss -0.8848
70
+ 2023-11-03 12:31:26.593635: val_loss -0.8376
71
+ 2023-11-03 12:31:26.593712: Pseudo dice [0.8533]
72
+ 2023-11-03 12:31:26.593794: Epoch time: 339.1 s
73
+ 2023-11-03 12:31:27.758358:
74
+ 2023-11-03 12:31:27.758461: Epoch 207
75
+ 2023-11-03 12:31:27.758577: Current learning rate: 0.00812
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_5_04_09_40.txt ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_fullres
10
+ {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}
14
+
15
+ 2023-11-05 04:09:44.851187: unpacking dataset...
16
+ 2023-11-05 04:09:48.933566: unpacking done...
17
+ 2023-11-05 04:09:48.934142: do_dummy_2d_data_aug: False
18
+ 2023-11-05 04:09:48.934667: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/splits_final.json
19
+ 2023-11-05 04:09:48.945231: The split file contains 5 splits.
20
+ 2023-11-05 04:09:48.945391: Desired fold for training: 0
21
+ 2023-11-05 04:09:48.945482: This split has 48 training and 12 validation cases.
22
+ 2023-11-05 04:10:11.625145: Unable to plot network architecture:
23
+ 2023-11-05 04:10:11.625230: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-11-05 04:10:11.719700:
25
+ 2023-11-05 04:10:11.719759: Epoch 200
26
+ 2023-11-05 04:10:11.719874: Current learning rate: 0.00818
27
+ 2023-11-05 04:18:29.378557: train_loss -0.8963
28
+ 2023-11-05 04:18:29.378763: val_loss -0.8535
29
+ 2023-11-05 04:18:29.378841: Pseudo dice [0.8645]
30
+ 2023-11-05 04:18:29.378945: Epoch time: 497.66 s
31
+ 2023-11-05 04:18:31.231425:
32
+ 2023-11-05 04:18:31.231653: Epoch 201
33
+ 2023-11-05 04:18:31.231818: Current learning rate: 0.00817
34
+ 2023-11-05 04:24:07.317888: train_loss -0.8954
35
+ 2023-11-05 04:24:07.318080: val_loss -0.8598
36
+ 2023-11-05 04:24:07.318157: Pseudo dice [0.869]
37
+ 2023-11-05 04:24:07.318239: Epoch time: 336.09 s
38
+ 2023-11-05 04:24:08.571152:
39
+ 2023-11-05 04:24:08.571258: Epoch 202
40
+ 2023-11-05 04:24:08.571376: Current learning rate: 0.00816
41
+ 2023-11-05 04:29:45.164048: train_loss -0.8944
42
+ 2023-11-05 04:29:45.164199: val_loss -0.8532
43
+ 2023-11-05 04:29:45.164302: Pseudo dice [0.8664]
44
+ 2023-11-05 04:29:45.164385: Epoch time: 336.59 s
45
+ 2023-11-05 04:29:46.411228:
46
+ 2023-11-05 04:29:46.411331: Epoch 203
47
+ 2023-11-05 04:29:46.411446: Current learning rate: 0.00815
48
+ 2023-11-05 04:35:23.083548: train_loss -0.8977
49
+ 2023-11-05 04:35:23.083705: val_loss -0.8498
50
+ 2023-11-05 04:35:23.083799: Pseudo dice [0.8619]
51
+ 2023-11-05 04:35:23.083886: Epoch time: 336.67 s
52
+ 2023-11-05 04:35:24.505950:
53
+ 2023-11-05 04:35:24.506143: Epoch 204
54
+ 2023-11-05 04:35:24.506316: Current learning rate: 0.00814
55
+ 2023-11-05 04:41:01.172172: train_loss -0.8969
56
+ 2023-11-05 04:41:01.172347: val_loss -0.8439
57
+ 2023-11-05 04:41:01.172426: Pseudo dice [0.8573]
58
+ 2023-11-05 04:41:01.172509: Epoch time: 336.67 s
59
+ 2023-11-05 04:41:02.423545:
60
+ 2023-11-05 04:41:02.423648: Epoch 205
61
+ 2023-11-05 04:41:02.423764: Current learning rate: 0.00813
62
+ 2023-11-05 04:46:39.185043: train_loss -0.899
63
+ 2023-11-05 04:46:39.185201: val_loss -0.8306
64
+ 2023-11-05 04:46:39.185294: Pseudo dice [0.845]
65
+ 2023-11-05 04:46:39.185378: Epoch time: 336.76 s
66
+ 2023-11-05 04:46:40.351708:
67
+ 2023-11-05 04:46:40.351811: Epoch 206
68
+ 2023-11-05 04:46:40.351925: Current learning rate: 0.00813
69
+ 2023-11-05 04:52:17.119716: train_loss -0.8889
70
+ 2023-11-05 04:52:17.119879: val_loss -0.8471
71
+ 2023-11-05 04:52:17.119971: Pseudo dice [0.8619]
72
+ 2023-11-05 04:52:17.120055: Epoch time: 336.77 s
73
+ 2023-11-05 04:52:18.291655:
74
+ 2023-11-05 04:52:18.291777: Epoch 207
75
+ 2023-11-05 04:52:18.291880: Current learning rate: 0.00812
76
+ 2023-11-05 04:57:55.088584: train_loss -0.8904
77
+ 2023-11-05 04:57:55.088741: val_loss -0.8199
78
+ 2023-11-05 04:57:55.088833: Pseudo dice [0.8312]
79
+ 2023-11-05 04:57:55.088917: Epoch time: 336.8 s
80
+ 2023-11-05 04:57:56.259826:
81
+ 2023-11-05 04:57:56.259928: Epoch 208
82
+ 2023-11-05 04:57:56.260041: Current learning rate: 0.00811
83
+ 2023-11-05 05:03:33.023086: train_loss -0.8776
84
+ 2023-11-05 05:03:33.023243: val_loss -0.8344
85
+ 2023-11-05 05:03:33.023333: Pseudo dice [0.8361]
86
+ 2023-11-05 05:03:33.023416: Epoch time: 336.76 s
87
+ 2023-11-05 05:03:34.194674:
88
+ 2023-11-05 05:03:34.194867: Epoch 209
89
+ 2023-11-05 05:03:34.195046: Current learning rate: 0.0081
90
+ 2023-11-05 05:09:10.916037: train_loss -0.8811
91
+ 2023-11-05 05:09:10.916196: val_loss -0.8567
92
+ 2023-11-05 05:09:10.916289: Pseudo dice [0.8693]
93
+ 2023-11-05 05:09:10.916372: Epoch time: 336.72 s
94
+ 2023-11-05 05:09:12.086027:
95
+ 2023-11-05 05:09:12.086130: Epoch 210
96
+ 2023-11-05 05:09:12.086244: Current learning rate: 0.00809
97
+ 2023-11-05 05:14:48.883498: train_loss -0.8665
98
+ 2023-11-05 05:14:48.883665: val_loss -0.8349
99
+ 2023-11-05 05:14:48.883741: Pseudo dice [0.8525]
100
+ 2023-11-05 05:14:48.883825: Epoch time: 336.8 s
101
+ 2023-11-05 05:14:50.310884:
102
+ 2023-11-05 05:14:50.311005: Epoch 211
103
+ 2023-11-05 05:14:50.311118: Current learning rate: 0.00808
104
+ 2023-11-05 05:20:27.010229: train_loss -0.8751
105
+ 2023-11-05 05:20:27.010414: val_loss -0.8415
106
+ 2023-11-05 05:20:27.010491: Pseudo dice [0.8564]
107
+ 2023-11-05 05:20:27.010574: Epoch time: 336.7 s
108
+ 2023-11-05 05:20:28.378409:
109
+ 2023-11-05 05:20:28.378535: Epoch 212
110
+ 2023-11-05 05:20:28.378637: Current learning rate: 0.00807
111
+ 2023-11-05 05:26:04.943592: train_loss -0.8826
112
+ 2023-11-05 05:26:04.943742: val_loss -0.8552
113
+ 2023-11-05 05:26:04.943833: Pseudo dice [0.8716]
114
+ 2023-11-05 05:26:04.943917: Epoch time: 336.57 s
115
+ 2023-11-05 05:26:06.119517:
116
+ 2023-11-05 05:26:06.119691: Epoch 213
117
+ 2023-11-05 05:26:06.119853: Current learning rate: 0.00806
118
+ 2023-11-05 05:31:42.810622: train_loss -0.8754
119
+ 2023-11-05 05:31:42.810811: val_loss -0.8427
120
+ 2023-11-05 05:31:42.810890: Pseudo dice [0.8527]
121
+ 2023-11-05 05:31:42.810975: Epoch time: 336.69 s
122
+ 2023-11-05 05:31:43.975534:
123
+ 2023-11-05 05:31:43.975637: Epoch 214
124
+ 2023-11-05 05:31:43.975755: Current learning rate: 0.00805
125
+ 2023-11-05 05:37:20.672652: train_loss -0.8855
126
+ 2023-11-05 05:37:20.672799: val_loss -0.8492
127
+ 2023-11-05 05:37:20.672890: Pseudo dice [0.8599]
128
+ 2023-11-05 05:37:20.672974: Epoch time: 336.7 s
129
+ 2023-11-05 05:37:21.850584:
130
+ 2023-11-05 05:37:21.850711: Epoch 215
131
+ 2023-11-05 05:37:21.850815: Current learning rate: 0.00804
132
+ 2023-11-05 05:42:58.389951: train_loss -0.8904
133
+ 2023-11-05 05:42:58.390139: val_loss -0.8411
134
+ 2023-11-05 05:42:58.390217: Pseudo dice [0.8592]
135
+ 2023-11-05 05:42:58.390302: Epoch time: 336.54 s
136
+ 2023-11-05 05:42:59.559914:
137
+ 2023-11-05 05:42:59.560022: Epoch 216
138
+ 2023-11-05 05:42:59.560137: Current learning rate: 0.00803
139
+ 2023-11-05 05:48:36.191868: train_loss -0.8891
140
+ 2023-11-05 05:48:36.192027: val_loss -0.8393
141
+ 2023-11-05 05:48:36.192119: Pseudo dice [0.8509]
142
+ 2023-11-05 05:48:36.192202: Epoch time: 336.63 s
143
+ 2023-11-05 05:48:37.363318:
144
+ 2023-11-05 05:48:37.363509: Epoch 217
145
+ 2023-11-05 05:48:37.363667: Current learning rate: 0.00802
146
+ 2023-11-05 05:54:14.058111: train_loss -0.887
147
+ 2023-11-05 05:54:14.058285: val_loss -0.8508
148
+ 2023-11-05 05:54:14.058363: Pseudo dice [0.8579]
149
+ 2023-11-05 05:54:14.058446: Epoch time: 336.7 s
150
+ 2023-11-05 05:54:15.391879:
151
+ 2023-11-05 05:54:15.391991: Epoch 218
152
+ 2023-11-05 05:54:15.392103: Current learning rate: 0.00801
153
+ 2023-11-05 05:59:51.944949: train_loss -0.8894
154
+ 2023-11-05 05:59:51.945105: val_loss -0.8573
155
+ 2023-11-05 05:59:51.945179: Pseudo dice [0.8683]
156
+ 2023-11-05 05:59:51.945259: Epoch time: 336.55 s
157
+ 2023-11-05 05:59:53.114332:
158
+ 2023-11-05 05:59:53.114552: Epoch 219
159
+ 2023-11-05 05:59:53.114659: Current learning rate: 0.00801
160
+ 2023-11-05 06:05:29.647128: train_loss -0.8943
161
+ 2023-11-05 06:05:29.647278: val_loss -0.8488
162
+ 2023-11-05 06:05:29.647370: Pseudo dice [0.8591]
163
+ 2023-11-05 06:05:29.647453: Epoch time: 336.53 s
164
+ 2023-11-05 06:05:30.829235:
165
+ 2023-11-05 06:05:30.829424: Epoch 220
166
+ 2023-11-05 06:05:30.829572: Current learning rate: 0.008
167
+ 2023-11-05 06:11:07.309388: train_loss -0.8938
168
+ 2023-11-05 06:11:07.309563: val_loss -0.8485
169
+ 2023-11-05 06:11:07.309640: Pseudo dice [0.859]
170
+ 2023-11-05 06:11:07.309725: Epoch time: 336.48 s
171
+ 2023-11-05 06:11:08.530596:
172
+ 2023-11-05 06:11:08.530813: Epoch 221
173
+ 2023-11-05 06:11:08.530957: Current learning rate: 0.00799
174
+ 2023-11-05 06:16:45.176777: train_loss -0.8938
175
+ 2023-11-05 06:16:45.176978: val_loss -0.8469
176
+ 2023-11-05 06:16:45.177070: Pseudo dice [0.8582]
177
+ 2023-11-05 06:16:45.177155: Epoch time: 336.65 s
178
+ 2023-11-05 06:16:46.397594:
179
+ 2023-11-05 06:16:46.397794: Epoch 222
180
+ 2023-11-05 06:16:46.398000: Current learning rate: 0.00798
181
+ 2023-11-05 06:22:22.964367: train_loss -0.8907
182
+ 2023-11-05 06:22:22.964521: val_loss -0.8509
183
+ 2023-11-05 06:22:22.964615: Pseudo dice [0.8607]
184
+ 2023-11-05 06:22:22.964696: Epoch time: 336.57 s
185
+ 2023-11-05 06:22:24.137230:
186
+ 2023-11-05 06:22:24.137419: Epoch 223
187
+ 2023-11-05 06:22:24.137586: Current learning rate: 0.00797
188
+ 2023-11-05 06:28:00.715508: train_loss -0.8963
189
+ 2023-11-05 06:28:00.715684: val_loss -0.8449
190
+ 2023-11-05 06:28:00.715761: Pseudo dice [0.8574]
191
+ 2023-11-05 06:28:00.715844: Epoch time: 336.58 s
192
+ 2023-11-05 06:28:01.887793:
193
+ 2023-11-05 06:28:01.887892: Epoch 224
194
+ 2023-11-05 06:28:01.888010: Current learning rate: 0.00796
195
+ 2023-11-05 06:33:38.709820: train_loss -0.8928
196
+ 2023-11-05 06:33:38.709995: val_loss -0.8478
197
+ 2023-11-05 06:33:38.710072: Pseudo dice [0.8599]
198
+ 2023-11-05 06:33:38.710154: Epoch time: 336.82 s
199
+ 2023-11-05 06:33:40.050272:
200
+ 2023-11-05 06:33:40.050496: Epoch 225
201
+ 2023-11-05 06:33:40.050603: Current learning rate: 0.00795
202
+ 2023-11-05 06:39:16.764840: train_loss -0.8949
203
+ 2023-11-05 06:39:16.765005: val_loss -0.8527
204
+ 2023-11-05 06:39:16.765084: Pseudo dice [0.864]
205
+ 2023-11-05 06:39:16.765167: Epoch time: 336.72 s
206
+ 2023-11-05 06:39:17.923813:
207
+ 2023-11-05 06:39:17.924005: Epoch 226
208
+ 2023-11-05 06:39:17.924152: Current learning rate: 0.00794
209
+ 2023-11-05 06:44:54.716879: train_loss -0.8955
210
+ 2023-11-05 06:44:54.717056: val_loss -0.8467
211
+ 2023-11-05 06:44:54.717134: Pseudo dice [0.8562]
212
+ 2023-11-05 06:44:54.717217: Epoch time: 336.79 s
213
+ 2023-11-05 06:44:55.883506:
214
+ 2023-11-05 06:44:55.883704: Epoch 227
215
+ 2023-11-05 06:44:55.883838: Current learning rate: 0.00793
216
+ 2023-11-05 06:50:32.721366: train_loss -0.8918
217
+ 2023-11-05 06:50:32.721525: val_loss -0.8536
218
+ 2023-11-05 06:50:32.721616: Pseudo dice [0.8653]
219
+ 2023-11-05 06:50:32.721699: Epoch time: 336.84 s
220
+ 2023-11-05 06:50:33.879340:
221
+ 2023-11-05 06:50:33.879519: Epoch 228
222
+ 2023-11-05 06:50:33.879700: Current learning rate: 0.00792
223
+ 2023-11-05 06:56:10.660731: train_loss -0.8945
224
+ 2023-11-05 06:56:10.660886: val_loss -0.8418
225
+ 2023-11-05 06:56:10.660968: Pseudo dice [0.8522]
226
+ 2023-11-05 06:56:10.661047: Epoch time: 336.78 s
227
+ 2023-11-05 06:56:11.819569:
228
+ 2023-11-05 06:56:11.819760: Epoch 229
229
+ 2023-11-05 06:56:11.819919: Current learning rate: 0.00791
230
+ 2023-11-05 07:01:48.471485: train_loss -0.8982
231
+ 2023-11-05 07:01:48.471736: val_loss -0.8247
232
+ 2023-11-05 07:01:48.471851: Pseudo dice [0.8308]
233
+ 2023-11-05 07:01:48.471960: Epoch time: 336.65 s
234
+ 2023-11-05 07:01:49.629322:
235
+ 2023-11-05 07:01:49.629491: Epoch 230
236
+ 2023-11-05 07:01:49.629657: Current learning rate: 0.0079
237
+ 2023-11-05 07:07:26.327863: train_loss -0.8949
238
+ 2023-11-05 07:07:26.328000: val_loss -0.8473
239
+ 2023-11-05 07:07:26.328154: Pseudo dice [0.8586]
240
+ 2023-11-05 07:07:26.328295: Epoch time: 336.7 s
241
+ 2023-11-05 07:07:27.485150:
242
+ 2023-11-05 07:07:27.485252: Epoch 231
243
+ 2023-11-05 07:07:27.485374: Current learning rate: 0.00789
244
+ 2023-11-05 07:13:04.086531: train_loss -0.8969
245
+ 2023-11-05 07:13:04.086713: val_loss -0.8552
246
+ 2023-11-05 07:13:04.086794: Pseudo dice [0.8625]
247
+ 2023-11-05 07:13:04.086879: Epoch time: 336.6 s
248
+ 2023-11-05 07:13:05.244104:
249
+ 2023-11-05 07:13:05.244329: Epoch 232
250
+ 2023-11-05 07:13:05.244478: Current learning rate: 0.00789
251
+ 2023-11-05 07:18:41.862534: train_loss -0.8963
252
+ 2023-11-05 07:18:41.862682: val_loss -0.85
253
+ 2023-11-05 07:18:41.862778: Pseudo dice [0.8598]
254
+ 2023-11-05 07:18:41.862863: Epoch time: 336.62 s
255
+ 2023-11-05 07:18:43.199996:
256
+ 2023-11-05 07:18:43.200215: Epoch 233
257
+ 2023-11-05 07:18:43.200398: Current learning rate: 0.00788
258
+ 2023-11-05 07:24:19.884622: train_loss -0.8957
259
+ 2023-11-05 07:24:19.884774: val_loss -0.847
260
+ 2023-11-05 07:24:19.884866: Pseudo dice [0.8597]
261
+ 2023-11-05 07:24:19.884951: Epoch time: 336.69 s
262
+ 2023-11-05 07:24:21.045713:
263
+ 2023-11-05 07:24:21.045908: Epoch 234
264
+ 2023-11-05 07:24:21.046064: Current learning rate: 0.00787
265
+ 2023-11-05 07:29:57.564746: train_loss -0.8997
266
+ 2023-11-05 07:29:57.564903: val_loss -0.8527
267
+ 2023-11-05 07:29:57.564996: Pseudo dice [0.8638]
268
+ 2023-11-05 07:29:57.565081: Epoch time: 336.52 s
269
+ 2023-11-05 07:29:58.725250:
270
+ 2023-11-05 07:29:58.725450: Epoch 235
271
+ 2023-11-05 07:29:58.725583: Current learning rate: 0.00786
272
+ 2023-11-05 07:35:35.281318: train_loss -0.8976
273
+ 2023-11-05 07:35:35.281476: val_loss -0.8511
274
+ 2023-11-05 07:35:35.281568: Pseudo dice [0.863]
275
+ 2023-11-05 07:35:35.281652: Epoch time: 336.56 s
276
+ 2023-11-05 07:35:36.444170:
277
+ 2023-11-05 07:35:36.444278: Epoch 236
278
+ 2023-11-05 07:35:36.444392: Current learning rate: 0.00785
279
+ 2023-11-05 07:41:13.052216: train_loss -0.9018
280
+ 2023-11-05 07:41:13.052391: val_loss -0.8605
281
+ 2023-11-05 07:41:13.052469: Pseudo dice [0.8727]
282
+ 2023-11-05 07:41:13.052554: Epoch time: 336.61 s
283
+ 2023-11-05 07:41:14.214841:
284
+ 2023-11-05 07:41:14.214948: Epoch 237
285
+ 2023-11-05 07:41:14.215052: Current learning rate: 0.00784
286
+ 2023-11-05 07:46:50.888771: train_loss -0.8999
287
+ 2023-11-05 07:46:50.888922: val_loss -0.8556
288
+ 2023-11-05 07:46:50.889014: Pseudo dice [0.8647]
289
+ 2023-11-05 07:46:50.889097: Epoch time: 336.67 s
290
+ 2023-11-05 07:46:50.889173: Yayy! New best EMA pseudo Dice: 0.8604
291
+ 2023-11-05 07:46:53.658867:
292
+ 2023-11-05 07:46:53.659136: Epoch 238
293
+ 2023-11-05 07:46:53.659337: Current learning rate: 0.00783
294
+ 2023-11-05 07:52:30.299002: train_loss -0.8995
295
+ 2023-11-05 07:52:30.299174: val_loss -0.8553
296
+ 2023-11-05 07:52:30.299266: Pseudo dice [0.8676]
297
+ 2023-11-05 07:52:30.299351: Epoch time: 336.64 s
298
+ 2023-11-05 07:52:30.299423: Yayy! New best EMA pseudo Dice: 0.8611
299
+ 2023-11-05 07:52:33.148313:
300
+ 2023-11-05 07:52:33.148435: Epoch 239
301
+ 2023-11-05 07:52:33.148538: Current learning rate: 0.00782
302
+ 2023-11-05 07:58:09.940793: train_loss -0.8989
303
+ 2023-11-05 07:58:09.940950: val_loss -0.8464
304
+ 2023-11-05 07:58:09.941041: Pseudo dice [0.8558]
305
+ 2023-11-05 07:58:09.941123: Epoch time: 336.79 s
306
+ 2023-11-05 07:58:11.332861:
307
+ 2023-11-05 07:58:11.332995: Epoch 240
308
+ 2023-11-05 07:58:11.333112: Current learning rate: 0.00781
309
+ 2023-11-05 08:03:48.039989: train_loss -0.9005
310
+ 2023-11-05 08:03:48.040145: val_loss -0.8514
311
+ 2023-11-05 08:03:48.040237: Pseudo dice [0.8624]
312
+ 2023-11-05 08:03:48.040322: Epoch time: 336.71 s
313
+ 2023-11-05 08:03:49.225978:
314
+ 2023-11-05 08:03:49.226168: Epoch 241
315
+ 2023-11-05 08:03:49.226308: Current learning rate: 0.0078
316
+ 2023-11-05 08:09:26.076155: train_loss -0.8987
317
+ 2023-11-05 08:09:26.076329: val_loss -0.8441
318
+ 2023-11-05 08:09:26.076405: Pseudo dice [0.8561]
319
+ 2023-11-05 08:09:26.076488: Epoch time: 336.85 s
320
+ 2023-11-05 08:09:27.251160:
321
+ 2023-11-05 08:09:27.251344: Epoch 242
322
+ 2023-11-05 08:09:27.251527: Current learning rate: 0.00779
323
+ 2023-11-05 08:15:03.983733: train_loss -0.8981
324
+ 2023-11-05 08:15:03.983889: val_loss -0.8605
325
+ 2023-11-05 08:15:03.983980: Pseudo dice [0.8703]
326
+ 2023-11-05 08:15:03.984065: Epoch time: 336.73 s
327
+ 2023-11-05 08:15:03.984137: Yayy! New best EMA pseudo Dice: 0.8613
328
+ 2023-11-05 08:15:06.856555:
329
+ 2023-11-05 08:15:06.856679: Epoch 243
330
+ 2023-11-05 08:15:06.856850: Current learning rate: 0.00778
331
+ 2023-11-05 08:20:43.705241: train_loss -0.9041
332
+ 2023-11-05 08:20:43.705400: val_loss -0.8509
333
+ 2023-11-05 08:20:43.705504: Pseudo dice [0.8656]
334
+ 2023-11-05 08:20:43.705587: Epoch time: 336.85 s
335
+ 2023-11-05 08:20:43.705657: Yayy! New best EMA pseudo Dice: 0.8617
336
+ 2023-11-05 08:20:46.451456:
337
+ 2023-11-05 08:20:46.451562: Epoch 244
338
+ 2023-11-05 08:20:46.451676: Current learning rate: 0.00777
339
+ 2023-11-05 08:26:23.336855: train_loss -0.9052
340
+ 2023-11-05 08:26:23.337011: val_loss -0.8634
341
+ 2023-11-05 08:26:23.337102: Pseudo dice [0.8769]
342
+ 2023-11-05 08:26:23.337185: Epoch time: 336.89 s
343
+ 2023-11-05 08:26:23.337255: Yayy! New best EMA pseudo Dice: 0.8632
344
+ 2023-11-05 08:26:26.481184:
345
+ 2023-11-05 08:26:26.481369: Epoch 245
346
+ 2023-11-05 08:26:26.481506: Current learning rate: 0.00777
347
+ 2023-11-05 08:32:03.379523: train_loss -0.8974
348
+ 2023-11-05 08:32:03.379673: val_loss -0.8559
349
+ 2023-11-05 08:32:03.379766: Pseudo dice [0.8705]
350
+ 2023-11-05 08:32:03.379849: Epoch time: 336.9 s
351
+ 2023-11-05 08:32:03.379918: Yayy! New best EMA pseudo Dice: 0.864
352
+ 2023-11-05 08:32:06.210111:
353
+ 2023-11-05 08:32:06.210321: Epoch 246
354
+ 2023-11-05 08:32:06.210461: Current learning rate: 0.00776
355
+ 2023-11-05 08:37:43.058265: train_loss -0.8949
356
+ 2023-11-05 08:37:43.058422: val_loss -0.8561
357
+ 2023-11-05 08:37:43.058514: Pseudo dice [0.8695]
358
+ 2023-11-05 08:37:43.058599: Epoch time: 336.85 s
359
+ 2023-11-05 08:37:43.058670: Yayy! New best EMA pseudo Dice: 0.8645
360
+ 2023-11-05 08:37:46.060923:
361
+ 2023-11-05 08:37:46.061033: Epoch 247
362
+ 2023-11-05 08:37:46.061150: Current learning rate: 0.00775
363
+ 2023-11-05 08:43:22.803843: train_loss -0.8978
364
+ 2023-11-05 08:43:22.804000: val_loss -0.8523
365
+ 2023-11-05 08:43:22.804091: Pseudo dice [0.8633]
366
+ 2023-11-05 08:43:22.804175: Epoch time: 336.74 s
367
+ 2023-11-05 08:43:23.988153:
368
+ 2023-11-05 08:43:23.988261: Epoch 248
369
+ 2023-11-05 08:43:23.988377: Current learning rate: 0.00774
370
+ 2023-11-05 08:49:00.653266: train_loss -0.8973
371
+ 2023-11-05 08:49:00.653416: val_loss -0.8588
372
+ 2023-11-05 08:49:00.653509: Pseudo dice [0.8694]
373
+ 2023-11-05 08:49:00.653594: Epoch time: 336.67 s
374
+ 2023-11-05 08:49:00.653666: Yayy! New best EMA pseudo Dice: 0.8649
375
+ 2023-11-05 08:49:03.604050:
376
+ 2023-11-05 08:49:03.604238: Epoch 249
377
+ 2023-11-05 08:49:03.604391: Current learning rate: 0.00773
378
+ 2023-11-05 08:54:40.151062: train_loss -0.9015
379
+ 2023-11-05 08:54:40.151213: val_loss -0.8587
380
+ 2023-11-05 08:54:40.151305: Pseudo dice [0.8722]
381
+ 2023-11-05 08:54:40.151388: Epoch time: 336.55 s
382
+ 2023-11-05 08:54:41.623696: Yayy! New best EMA pseudo Dice: 0.8656
383
+ 2023-11-05 08:54:44.572229:
384
+ 2023-11-05 08:54:44.572338: Epoch 250
385
+ 2023-11-05 08:54:44.572453: Current learning rate: 0.00772
386
+ 2023-11-05 09:00:21.093516: train_loss -0.8954
387
+ 2023-11-05 09:00:21.093673: val_loss -0.8498
388
+ 2023-11-05 09:00:21.093765: Pseudo dice [0.8571]
389
+ 2023-11-05 09:00:21.093847: Epoch time: 336.52 s
390
+ 2023-11-05 09:00:22.271945:
391
+ 2023-11-05 09:00:22.272053: Epoch 251
392
+ 2023-11-05 09:00:22.272167: Current learning rate: 0.00771
393
+ 2023-11-05 09:05:58.928969: train_loss -0.8932
394
+ 2023-11-05 09:05:58.929129: val_loss -0.8503
395
+ 2023-11-05 09:05:58.929219: Pseudo dice [0.8621]
396
+ 2023-11-05 09:05:58.929303: Epoch time: 336.66 s
397
+ 2023-11-05 09:06:00.116171:
398
+ 2023-11-05 09:06:00.116339: Epoch 252
399
+ 2023-11-05 09:06:00.116512: Current learning rate: 0.0077
400
+ 2023-11-05 09:11:36.520906: train_loss -0.8981
401
+ 2023-11-05 09:11:36.521079: val_loss -0.8612
402
+ 2023-11-05 09:11:36.521157: Pseudo dice [0.8724]
403
+ 2023-11-05 09:11:36.521240: Epoch time: 336.41 s
404
+ 2023-11-05 09:11:37.699404:
405
+ 2023-11-05 09:11:37.699508: Epoch 253
406
+ 2023-11-05 09:11:37.699622: Current learning rate: 0.00769
407
+ 2023-11-05 09:17:14.168388: train_loss -0.8982
408
+ 2023-11-05 09:17:14.168558: val_loss -0.8514
409
+ 2023-11-05 09:17:14.168636: Pseudo dice [0.8627]
410
+ 2023-11-05 09:17:14.168720: Epoch time: 336.47 s
411
+ 2023-11-05 09:17:15.524000:
412
+ 2023-11-05 09:17:15.524110: Epoch 254
413
+ 2023-11-05 09:17:15.524224: Current learning rate: 0.00768
414
+ 2023-11-05 09:22:51.950844: train_loss -0.9009
415
+ 2023-11-05 09:22:51.951087: val_loss -0.8348
416
+ 2023-11-05 09:22:51.951214: Pseudo dice [0.8452]
417
+ 2023-11-05 09:22:51.951299: Epoch time: 336.43 s
418
+ 2023-11-05 09:22:53.126521:
419
+ 2023-11-05 09:22:53.126629: Epoch 255
420
+ 2023-11-05 09:22:53.126750: Current learning rate: 0.00767
421
+ 2023-11-05 09:28:29.604919: train_loss -0.9005
422
+ 2023-11-05 09:28:29.605067: val_loss -0.8427
423
+ 2023-11-05 09:28:29.605160: Pseudo dice [0.8563]
424
+ 2023-11-05 09:28:29.605243: Epoch time: 336.48 s
425
+ 2023-11-05 09:28:30.788095:
426
+ 2023-11-05 09:28:30.788343: Epoch 256
427
+ 2023-11-05 09:28:30.788530: Current learning rate: 0.00766
428
+ 2023-11-05 09:34:07.254575: train_loss -0.8984
429
+ 2023-11-05 09:34:07.254762: val_loss -0.8477
430
+ 2023-11-05 09:34:07.254842: Pseudo dice [0.8586]
431
+ 2023-11-05 09:34:07.254927: Epoch time: 336.47 s
432
+ 2023-11-05 09:34:08.440460:
433
+ 2023-11-05 09:34:08.440657: Epoch 257
434
+ 2023-11-05 09:34:08.440790: Current learning rate: 0.00765
435
+ 2023-11-05 09:39:44.971086: train_loss -0.8882
436
+ 2023-11-05 09:39:44.971243: val_loss -0.8307
437
+ 2023-11-05 09:39:44.971334: Pseudo dice [0.8528]
438
+ 2023-11-05 09:39:44.971418: Epoch time: 336.53 s
439
+ 2023-11-05 09:39:46.153480:
440
+ 2023-11-05 09:39:46.153658: Epoch 258
441
+ 2023-11-05 09:39:46.153792: Current learning rate: 0.00764
442
+ 2023-11-05 09:45:22.513571: train_loss -0.887
443
+ 2023-11-05 09:45:22.513754: val_loss -0.8519
444
+ 2023-11-05 09:45:22.513832: Pseudo dice [0.8675]
445
+ 2023-11-05 09:45:22.513916: Epoch time: 336.36 s
446
+ 2023-11-05 09:45:23.693793:
447
+ 2023-11-05 09:45:23.693897: Epoch 259
448
+ 2023-11-05 09:45:23.694010: Current learning rate: 0.00764
449
+ 2023-11-05 09:50:59.927114: train_loss -0.8952
450
+ 2023-11-05 09:50:59.927264: val_loss -0.8551
451
+ 2023-11-05 09:50:59.927355: Pseudo dice [0.8657]
452
+ 2023-11-05 09:50:59.927438: Epoch time: 336.23 s
453
+ 2023-11-05 09:51:01.110373:
454
+ 2023-11-05 09:51:01.110478: Epoch 260
455
+ 2023-11-05 09:51:01.110596: Current learning rate: 0.00763
456
+ 2023-11-05 09:56:37.471954: train_loss -0.8993
457
+ 2023-11-05 09:56:37.472148: val_loss -0.8364
458
+ 2023-11-05 09:56:37.472227: Pseudo dice [0.8511]
459
+ 2023-11-05 09:56:37.472312: Epoch time: 336.36 s
460
+ 2023-11-05 09:56:38.820428:
461
+ 2023-11-05 09:56:38.820723: Epoch 261
462
+ 2023-11-05 09:56:38.820928: Current learning rate: 0.00762
463
+ 2023-11-05 10:02:15.153871: train_loss -0.8993
464
+ 2023-11-05 10:02:15.154031: val_loss -0.8439
465
+ 2023-11-05 10:02:15.154112: Pseudo dice [0.8546]
466
+ 2023-11-05 10:02:15.154196: Epoch time: 336.33 s
467
+ 2023-11-05 10:02:16.353188:
468
+ 2023-11-05 10:02:16.353391: Epoch 262
469
+ 2023-11-05 10:02:16.353563: Current learning rate: 0.00761
470
+ 2023-11-05 10:07:52.822573: train_loss -0.8939
471
+ 2023-11-05 10:07:52.822755: val_loss -0.8468
472
+ 2023-11-05 10:07:52.822835: Pseudo dice [0.8615]
473
+ 2023-11-05 10:07:52.822918: Epoch time: 336.47 s
474
+ 2023-11-05 10:07:54.006406:
475
+ 2023-11-05 10:07:54.006590: Epoch 263
476
+ 2023-11-05 10:07:54.006749: Current learning rate: 0.0076
477
+ 2023-11-05 10:13:30.490350: train_loss -0.8981
478
+ 2023-11-05 10:13:30.490521: val_loss -0.8648
479
+ 2023-11-05 10:13:30.490600: Pseudo dice [0.88]
480
+ 2023-11-05 10:13:30.490690: Epoch time: 336.48 s
481
+ 2023-11-05 10:13:31.673057:
482
+ 2023-11-05 10:13:31.673234: Epoch 264
483
+ 2023-11-05 10:13:31.673396: Current learning rate: 0.00759
484
+ 2023-11-05 10:19:08.141053: train_loss -0.8919
485
+ 2023-11-05 10:19:08.141204: val_loss -0.8418
486
+ 2023-11-05 10:19:08.141295: Pseudo dice [0.8495]
487
+ 2023-11-05 10:19:08.141380: Epoch time: 336.47 s
488
+ 2023-11-05 10:19:09.324904:
489
+ 2023-11-05 10:19:09.325084: Epoch 265
490
+ 2023-11-05 10:19:09.325246: Current learning rate: 0.00758
491
+ 2023-11-05 10:24:45.671090: train_loss -0.8963
492
+ 2023-11-05 10:24:45.671249: val_loss -0.832
493
+ 2023-11-05 10:24:45.671340: Pseudo dice [0.8478]
494
+ 2023-11-05 10:24:45.671424: Epoch time: 336.35 s
495
+ 2023-11-05 10:24:46.860962:
496
+ 2023-11-05 10:24:46.861181: Epoch 266
497
+ 2023-11-05 10:24:46.861332: Current learning rate: 0.00757
498
+ 2023-11-05 10:30:23.222403: train_loss -0.8981
499
+ 2023-11-05 10:30:23.222560: val_loss -0.851
500
+ 2023-11-05 10:30:23.222651: Pseudo dice [0.8643]
501
+ 2023-11-05 10:30:23.222746: Epoch time: 336.36 s
502
+ 2023-11-05 10:30:24.402255:
503
+ 2023-11-05 10:30:24.402357: Epoch 267
504
+ 2023-11-05 10:30:24.402470: Current learning rate: 0.00756
505
+ 2023-11-05 10:36:00.893981: train_loss -0.8949
506
+ 2023-11-05 10:36:00.894129: val_loss -0.8552
507
+ 2023-11-05 10:36:00.894219: Pseudo dice [0.8656]
508
+ 2023-11-05 10:36:00.894303: Epoch time: 336.49 s
509
+ 2023-11-05 10:36:02.075371:
510
+ 2023-11-05 10:36:02.075579: Epoch 268
511
+ 2023-11-05 10:36:02.075734: Current learning rate: 0.00755
512
+ 2023-11-05 10:41:38.561182: train_loss -0.8932
513
+ 2023-11-05 10:41:38.561357: val_loss -0.8381
514
+ 2023-11-05 10:41:38.561446: Pseudo dice [0.8502]
515
+ 2023-11-05 10:41:38.561532: Epoch time: 336.49 s
516
+ 2023-11-05 10:41:39.935336:
517
+ 2023-11-05 10:41:39.935469: Epoch 269
518
+ 2023-11-05 10:41:39.935575: Current learning rate: 0.00754
519
+ 2023-11-05 10:47:16.351935: train_loss -0.8926
520
+ 2023-11-05 10:47:16.352205: val_loss -0.8325
521
+ 2023-11-05 10:47:16.352346: Pseudo dice [0.8418]
522
+ 2023-11-05 10:47:16.352437: Epoch time: 336.42 s
523
+ 2023-11-05 10:47:17.548752:
524
+ 2023-11-05 10:47:17.548880: Epoch 270
525
+ 2023-11-05 10:47:17.548986: Current learning rate: 0.00753
526
+ 2023-11-05 10:52:53.916208: train_loss -0.8945
527
+ 2023-11-05 10:52:53.916448: val_loss -0.8443
528
+ 2023-11-05 10:52:53.916532: Pseudo dice [0.8577]
529
+ 2023-11-05 10:52:53.916620: Epoch time: 336.37 s
530
+ 2023-11-05 10:52:55.105769:
531
+ 2023-11-05 10:52:55.105881: Epoch 271
532
+ 2023-11-05 10:52:55.106001: Current learning rate: 0.00752
533
+ 2023-11-05 10:58:31.352479: train_loss -0.899
534
+ 2023-11-05 10:58:31.352655: val_loss -0.8463
535
+ 2023-11-05 10:58:31.352748: Pseudo dice [0.8606]
536
+ 2023-11-05 10:58:31.352836: Epoch time: 336.25 s
537
+ 2023-11-05 10:58:32.543985:
538
+ 2023-11-05 10:58:32.544102: Epoch 272
539
+ 2023-11-05 10:58:32.544209: Current learning rate: 0.00751
540
+ 2023-11-05 11:04:08.830940: train_loss -0.9028
541
+ 2023-11-05 11:04:08.831125: val_loss -0.8518
542
+ 2023-11-05 11:04:08.831206: Pseudo dice [0.8601]
543
+ 2023-11-05 11:04:08.831294: Epoch time: 336.29 s
544
+ 2023-11-05 11:04:10.022593:
545
+ 2023-11-05 11:04:10.022724: Epoch 273
546
+ 2023-11-05 11:04:10.022831: Current learning rate: 0.00751
547
+ 2023-11-05 11:09:46.306644: train_loss -0.9026
548
+ 2023-11-05 11:09:46.306836: val_loss -0.8587
549
+ 2023-11-05 11:09:46.306919: Pseudo dice [0.8705]
550
+ 2023-11-05 11:09:46.307007: Epoch time: 336.28 s
551
+ 2023-11-05 11:09:47.492248:
552
+ 2023-11-05 11:09:47.492369: Epoch 274
553
+ 2023-11-05 11:09:47.492474: Current learning rate: 0.0075
554
+ 2023-11-05 11:15:23.910444: train_loss -0.9002
555
+ 2023-11-05 11:15:23.910617: val_loss -0.8319
556
+ 2023-11-05 11:15:23.910706: Pseudo dice [0.8487]
557
+ 2023-11-05 11:15:23.910794: Epoch time: 336.42 s
558
+ 2023-11-05 11:15:25.100207:
559
+ 2023-11-05 11:15:25.100328: Epoch 275
560
+ 2023-11-05 11:15:25.100434: Current learning rate: 0.00749
561
+ 2023-11-05 11:21:01.540155: train_loss -0.8917
562
+ 2023-11-05 11:21:01.540320: val_loss -0.8314
563
+ 2023-11-05 11:21:01.540402: Pseudo dice [0.8427]
564
+ 2023-11-05 11:21:01.540490: Epoch time: 336.44 s
565
+ 2023-11-05 11:21:02.904755:
566
+ 2023-11-05 11:21:02.904980: Epoch 276
567
+ 2023-11-05 11:21:02.905159: Current learning rate: 0.00748
568
+ 2023-11-05 11:26:39.332018: train_loss -0.8931
569
+ 2023-11-05 11:26:39.332191: val_loss -0.8619
570
+ 2023-11-05 11:26:39.332274: Pseudo dice [0.8738]
571
+ 2023-11-05 11:26:39.332361: Epoch time: 336.43 s
572
+ 2023-11-05 11:26:40.517618:
573
+ 2023-11-05 11:26:40.517750: Epoch 277
574
+ 2023-11-05 11:26:40.517855: Current learning rate: 0.00747
575
+ 2023-11-05 11:32:16.904585: train_loss -0.8965
576
+ 2023-11-05 11:32:16.904767: val_loss -0.8536
577
+ 2023-11-05 11:32:16.904848: Pseudo dice [0.866]
578
+ 2023-11-05 11:32:16.904935: Epoch time: 336.39 s
579
+ 2023-11-05 11:32:18.094621:
580
+ 2023-11-05 11:32:18.094757: Epoch 278
581
+ 2023-11-05 11:32:18.094863: Current learning rate: 0.00746
582
+ 2023-11-05 11:37:54.471582: train_loss -0.8956
583
+ 2023-11-05 11:37:54.471756: val_loss -0.8375
584
+ 2023-11-05 11:37:54.471838: Pseudo dice [0.8503]
585
+ 2023-11-05 11:37:54.471924: Epoch time: 336.38 s
586
+ 2023-11-05 11:37:55.659006:
587
+ 2023-11-05 11:37:55.659144: Epoch 279
588
+ 2023-11-05 11:37:55.659250: Current learning rate: 0.00745
589
+ 2023-11-05 11:43:32.030911: train_loss -0.8979
590
+ 2023-11-05 11:43:32.031115: val_loss -0.8502
591
+ 2023-11-05 11:43:32.031196: Pseudo dice [0.8613]
592
+ 2023-11-05 11:43:32.031282: Epoch time: 336.37 s
593
+ 2023-11-05 11:43:33.219712:
594
+ 2023-11-05 11:43:33.219906: Epoch 280
595
+ 2023-11-05 11:43:33.220042: Current learning rate: 0.00744
596
+ 2023-11-05 11:49:09.584111: train_loss -0.9026
597
+ 2023-11-05 11:49:09.584287: val_loss -0.852
598
+ 2023-11-05 11:49:09.584369: Pseudo dice [0.8654]
599
+ 2023-11-05 11:49:09.584456: Epoch time: 336.37 s
600
+ 2023-11-05 11:49:10.774769:
601
+ 2023-11-05 11:49:10.774882: Epoch 281
602
+ 2023-11-05 11:49:10.774988: Current learning rate: 0.00743
603
+ 2023-11-05 11:54:47.167208: train_loss -0.9039
604
+ 2023-11-05 11:54:47.167378: val_loss -0.8557
605
+ 2023-11-05 11:54:47.167460: Pseudo dice [0.8706]
606
+ 2023-11-05 11:54:47.167548: Epoch time: 336.39 s
607
+ 2023-11-05 11:54:48.355618:
608
+ 2023-11-05 11:54:48.355834: Epoch 282
609
+ 2023-11-05 11:54:48.355945: Current learning rate: 0.00742
610
+ 2023-11-05 12:00:24.700680: train_loss -0.9009
611
+ 2023-11-05 12:00:24.700852: val_loss -0.8545
612
+ 2023-11-05 12:00:24.700935: Pseudo dice [0.8624]
613
+ 2023-11-05 12:00:24.701021: Epoch time: 336.35 s
614
+ 2023-11-05 12:00:26.065410:
615
+ 2023-11-05 12:00:26.065542: Epoch 283
616
+ 2023-11-05 12:00:26.065648: Current learning rate: 0.00741
617
+ 2023-11-05 12:06:02.516538: train_loss -0.9025
618
+ 2023-11-05 12:06:02.516719: val_loss -0.8496
619
+ 2023-11-05 12:06:02.516802: Pseudo dice [0.86]
620
+ 2023-11-05 12:06:02.516889: Epoch time: 336.45 s
621
+ 2023-11-05 12:06:03.704304:
622
+ 2023-11-05 12:06:03.704428: Epoch 284
623
+ 2023-11-05 12:06:03.704533: Current learning rate: 0.0074
624
+ 2023-11-05 12:11:40.000469: train_loss -0.9052
625
+ 2023-11-05 12:11:40.000650: val_loss -0.8558
626
+ 2023-11-05 12:11:40.000813: Pseudo dice [0.865]
627
+ 2023-11-05 12:11:40.000901: Epoch time: 336.3 s
628
+ 2023-11-05 12:11:41.189816:
629
+ 2023-11-05 12:11:41.189943: Epoch 285
630
+ 2023-11-05 12:11:41.190048: Current learning rate: 0.00739
631
+ 2023-11-05 12:17:17.614229: train_loss -0.8993
632
+ 2023-11-05 12:17:17.614404: val_loss -0.8633
633
+ 2023-11-05 12:17:17.614494: Pseudo dice [0.8722]
634
+ 2023-11-05 12:17:17.614581: Epoch time: 336.43 s
635
+ 2023-11-05 12:17:18.806580:
636
+ 2023-11-05 12:17:18.806711: Epoch 286
637
+ 2023-11-05 12:17:18.806821: Current learning rate: 0.00738
638
+ 2023-11-05 12:22:55.308946: train_loss -0.904
639
+ 2023-11-05 12:22:55.309128: val_loss -0.8528
640
+ 2023-11-05 12:22:55.309210: Pseudo dice [0.8648]
641
+ 2023-11-05 12:22:55.309298: Epoch time: 336.5 s
642
+ 2023-11-05 12:22:56.522791:
643
+ 2023-11-05 12:22:56.522915: Epoch 287
644
+ 2023-11-05 12:22:56.523020: Current learning rate: 0.00738
645
+ 2023-11-05 12:28:33.033705: train_loss -0.9002
646
+ 2023-11-05 12:28:33.033885: val_loss -0.8433
647
+ 2023-11-05 12:28:33.033967: Pseudo dice [0.8583]
648
+ 2023-11-05 12:28:33.034055: Epoch time: 336.51 s
649
+ 2023-11-05 12:28:34.246769:
650
+ 2023-11-05 12:28:34.246976: Epoch 288
651
+ 2023-11-05 12:28:34.247135: Current learning rate: 0.00737
652
+ 2023-11-05 12:34:10.696105: train_loss -0.8819
653
+ 2023-11-05 12:34:10.696274: val_loss -0.8507
654
+ 2023-11-05 12:34:10.696357: Pseudo dice [0.8663]
655
+ 2023-11-05 12:34:10.696445: Epoch time: 336.45 s
656
+ 2023-11-05 12:34:11.906882:
657
+ 2023-11-05 12:34:11.907115: Epoch 289
658
+ 2023-11-05 12:34:11.907276: Current learning rate: 0.00736
659
+ 2023-11-05 12:39:48.418885: train_loss -0.876
660
+ 2023-11-05 12:39:48.419172: val_loss -0.8471
661
+ 2023-11-05 12:39:48.419317: Pseudo dice [0.8583]
662
+ 2023-11-05 12:39:48.419406: Epoch time: 336.51 s
663
+ 2023-11-05 12:39:49.816172:
664
+ 2023-11-05 12:39:49.816365: Epoch 290
665
+ 2023-11-05 12:39:49.816515: Current learning rate: 0.00735
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_fullres/plans.json ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Dataset721_TSPrimeCTVP",
3
+ "plans_name": "nnUNetPlans",
4
+ "original_median_spacing_after_transp": [
5
+ 2.5,
6
+ 1.269531011581421,
7
+ 1.269531011581421
8
+ ],
9
+ "original_median_shape_after_transp": [
10
+ 241,
11
+ 512,
12
+ 512
13
+ ],
14
+ "image_reader_writer": "SimpleITKIO",
15
+ "transpose_forward": [
16
+ 0,
17
+ 1,
18
+ 2
19
+ ],
20
+ "transpose_backward": [
21
+ 0,
22
+ 1,
23
+ 2
24
+ ],
25
+ "configurations": {
26
+ "2d": {
27
+ "data_identifier": "nnUNetPlans_2d",
28
+ "preprocessor_name": "DefaultPreprocessor",
29
+ "batch_size": 12,
30
+ "patch_size": [
31
+ 512,
32
+ 512
33
+ ],
34
+ "median_image_size_in_voxels": [
35
+ 512.0,
36
+ 512.0
37
+ ],
38
+ "spacing": [
39
+ 1.269531011581421,
40
+ 1.269531011581421
41
+ ],
42
+ "normalization_schemes": [
43
+ "CTNormalization"
44
+ ],
45
+ "use_mask_for_norm": [
46
+ false
47
+ ],
48
+ "UNet_class_name": "PlainConvUNet",
49
+ "UNet_base_num_features": 32,
50
+ "n_conv_per_stage_encoder": [
51
+ 2,
52
+ 2,
53
+ 2,
54
+ 2,
55
+ 2,
56
+ 2,
57
+ 2,
58
+ 2
59
+ ],
60
+ "n_conv_per_stage_decoder": [
61
+ 2,
62
+ 2,
63
+ 2,
64
+ 2,
65
+ 2,
66
+ 2,
67
+ 2
68
+ ],
69
+ "num_pool_per_axis": [
70
+ 7,
71
+ 7
72
+ ],
73
+ "pool_op_kernel_sizes": [
74
+ [
75
+ 1,
76
+ 1
77
+ ],
78
+ [
79
+ 2,
80
+ 2
81
+ ],
82
+ [
83
+ 2,
84
+ 2
85
+ ],
86
+ [
87
+ 2,
88
+ 2
89
+ ],
90
+ [
91
+ 2,
92
+ 2
93
+ ],
94
+ [
95
+ 2,
96
+ 2
97
+ ],
98
+ [
99
+ 2,
100
+ 2
101
+ ],
102
+ [
103
+ 2,
104
+ 2
105
+ ]
106
+ ],
107
+ "conv_kernel_sizes": [
108
+ [
109
+ 3,
110
+ 3
111
+ ],
112
+ [
113
+ 3,
114
+ 3
115
+ ],
116
+ [
117
+ 3,
118
+ 3
119
+ ],
120
+ [
121
+ 3,
122
+ 3
123
+ ],
124
+ [
125
+ 3,
126
+ 3
127
+ ],
128
+ [
129
+ 3,
130
+ 3
131
+ ],
132
+ [
133
+ 3,
134
+ 3
135
+ ],
136
+ [
137
+ 3,
138
+ 3
139
+ ]
140
+ ],
141
+ "unet_max_num_features": 512,
142
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
143
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
144
+ "resampling_fn_data_kwargs": {
145
+ "is_seg": false,
146
+ "order": 3,
147
+ "order_z": 0,
148
+ "force_separate_z": null
149
+ },
150
+ "resampling_fn_seg_kwargs": {
151
+ "is_seg": true,
152
+ "order": 1,
153
+ "order_z": 0,
154
+ "force_separate_z": null
155
+ },
156
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
157
+ "resampling_fn_probabilities_kwargs": {
158
+ "is_seg": false,
159
+ "order": 1,
160
+ "order_z": 0,
161
+ "force_separate_z": null
162
+ },
163
+ "batch_dice": true
164
+ },
165
+ "3d_lowres": {
166
+ "data_identifier": "nnUNetPlans_3d_lowres",
167
+ "preprocessor_name": "DefaultPreprocessor",
168
+ "batch_size": 2,
169
+ "patch_size": [
170
+ 80,
171
+ 192,
172
+ 160
173
+ ],
174
+ "median_image_size_in_voxels": [
175
+ 130,
176
+ 275,
177
+ 275
178
+ ],
179
+ "spacing": [
180
+ 4.650736429273743,
181
+ 2.361701649461784,
182
+ 2.361701649461784
183
+ ],
184
+ "normalization_schemes": [
185
+ "CTNormalization"
186
+ ],
187
+ "use_mask_for_norm": [
188
+ false
189
+ ],
190
+ "UNet_class_name": "PlainConvUNet",
191
+ "UNet_base_num_features": 32,
192
+ "n_conv_per_stage_encoder": [
193
+ 2,
194
+ 2,
195
+ 2,
196
+ 2,
197
+ 2,
198
+ 2
199
+ ],
200
+ "n_conv_per_stage_decoder": [
201
+ 2,
202
+ 2,
203
+ 2,
204
+ 2,
205
+ 2
206
+ ],
207
+ "num_pool_per_axis": [
208
+ 4,
209
+ 5,
210
+ 5
211
+ ],
212
+ "pool_op_kernel_sizes": [
213
+ [
214
+ 1,
215
+ 1,
216
+ 1
217
+ ],
218
+ [
219
+ 2,
220
+ 2,
221
+ 2
222
+ ],
223
+ [
224
+ 2,
225
+ 2,
226
+ 2
227
+ ],
228
+ [
229
+ 2,
230
+ 2,
231
+ 2
232
+ ],
233
+ [
234
+ 2,
235
+ 2,
236
+ 2
237
+ ],
238
+ [
239
+ 1,
240
+ 2,
241
+ 2
242
+ ]
243
+ ],
244
+ "conv_kernel_sizes": [
245
+ [
246
+ 3,
247
+ 3,
248
+ 3
249
+ ],
250
+ [
251
+ 3,
252
+ 3,
253
+ 3
254
+ ],
255
+ [
256
+ 3,
257
+ 3,
258
+ 3
259
+ ],
260
+ [
261
+ 3,
262
+ 3,
263
+ 3
264
+ ],
265
+ [
266
+ 3,
267
+ 3,
268
+ 3
269
+ ],
270
+ [
271
+ 3,
272
+ 3,
273
+ 3
274
+ ]
275
+ ],
276
+ "unet_max_num_features": 320,
277
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
278
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
279
+ "resampling_fn_data_kwargs": {
280
+ "is_seg": false,
281
+ "order": 3,
282
+ "order_z": 0,
283
+ "force_separate_z": null
284
+ },
285
+ "resampling_fn_seg_kwargs": {
286
+ "is_seg": true,
287
+ "order": 1,
288
+ "order_z": 0,
289
+ "force_separate_z": null
290
+ },
291
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
292
+ "resampling_fn_probabilities_kwargs": {
293
+ "is_seg": false,
294
+ "order": 1,
295
+ "order_z": 0,
296
+ "force_separate_z": null
297
+ },
298
+ "batch_dice": false,
299
+ "next_stage": "3d_cascade_fullres"
300
+ },
301
+ "3d_fullres": {
302
+ "data_identifier": "nnUNetPlans_3d_fullres",
303
+ "preprocessor_name": "DefaultPreprocessor",
304
+ "batch_size": 2,
305
+ "patch_size": [
306
+ 80,
307
+ 192,
308
+ 160
309
+ ],
310
+ "median_image_size_in_voxels": [
311
+ 241.0,
312
+ 512.0,
313
+ 512.0
314
+ ],
315
+ "spacing": [
316
+ 2.5,
317
+ 1.269531011581421,
318
+ 1.269531011581421
319
+ ],
320
+ "normalization_schemes": [
321
+ "CTNormalization"
322
+ ],
323
+ "use_mask_for_norm": [
324
+ false
325
+ ],
326
+ "UNet_class_name": "PlainConvUNet",
327
+ "UNet_base_num_features": 32,
328
+ "n_conv_per_stage_encoder": [
329
+ 2,
330
+ 2,
331
+ 2,
332
+ 2,
333
+ 2,
334
+ 2
335
+ ],
336
+ "n_conv_per_stage_decoder": [
337
+ 2,
338
+ 2,
339
+ 2,
340
+ 2,
341
+ 2
342
+ ],
343
+ "num_pool_per_axis": [
344
+ 4,
345
+ 5,
346
+ 5
347
+ ],
348
+ "pool_op_kernel_sizes": [
349
+ [
350
+ 1,
351
+ 1,
352
+ 1
353
+ ],
354
+ [
355
+ 2,
356
+ 2,
357
+ 2
358
+ ],
359
+ [
360
+ 2,
361
+ 2,
362
+ 2
363
+ ],
364
+ [
365
+ 2,
366
+ 2,
367
+ 2
368
+ ],
369
+ [
370
+ 2,
371
+ 2,
372
+ 2
373
+ ],
374
+ [
375
+ 1,
376
+ 2,
377
+ 2
378
+ ]
379
+ ],
380
+ "conv_kernel_sizes": [
381
+ [
382
+ 3,
383
+ 3,
384
+ 3
385
+ ],
386
+ [
387
+ 3,
388
+ 3,
389
+ 3
390
+ ],
391
+ [
392
+ 3,
393
+ 3,
394
+ 3
395
+ ],
396
+ [
397
+ 3,
398
+ 3,
399
+ 3
400
+ ],
401
+ [
402
+ 3,
403
+ 3,
404
+ 3
405
+ ],
406
+ [
407
+ 3,
408
+ 3,
409
+ 3
410
+ ]
411
+ ],
412
+ "unet_max_num_features": 320,
413
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
414
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
415
+ "resampling_fn_data_kwargs": {
416
+ "is_seg": false,
417
+ "order": 3,
418
+ "order_z": 0,
419
+ "force_separate_z": null
420
+ },
421
+ "resampling_fn_seg_kwargs": {
422
+ "is_seg": true,
423
+ "order": 1,
424
+ "order_z": 0,
425
+ "force_separate_z": null
426
+ },
427
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
428
+ "resampling_fn_probabilities_kwargs": {
429
+ "is_seg": false,
430
+ "order": 1,
431
+ "order_z": 0,
432
+ "force_separate_z": null
433
+ },
434
+ "batch_dice": true
435
+ },
436
+ "3d_cascade_fullres": {
437
+ "inherits_from": "3d_fullres",
438
+ "previous_stage": "3d_lowres"
439
+ }
440
+ },
441
+ "experiment_planner_used": "ExperimentPlanner",
442
+ "label_manager": "LabelManager",
443
+ "foreground_intensity_properties_per_channel": {
444
+ "0": {
445
+ "max": 882.0,
446
+ "mean": 45.35713577270508,
447
+ "median": 48.0,
448
+ "min": -118.0,
449
+ "percentile_00_5": -48.0,
450
+ "percentile_99_5": 103.0,
451
+ "std": 26.203161239624023
452
+ }
453
+ }
454
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/dataset.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "CT"
4
+ },
5
+ "labels": {
6
+ "background": 0,
7
+ "Ctvp": 1
8
+ },
9
+ "numTraining": 60,
10
+ "file_ending": ".nii.gz",
11
+ "numTest": 0
12
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/dataset_fingerprint.json ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "foreground_intensity_properties_per_channel": {
3
+ "0": {
4
+ "max": 882.0,
5
+ "mean": 45.35713577270508,
6
+ "median": 48.0,
7
+ "min": -118.0,
8
+ "percentile_00_5": -48.0,
9
+ "percentile_99_5": 103.0,
10
+ "std": 26.203161239624023
11
+ }
12
+ },
13
+ "median_relative_size_after_cropping": 1.0,
14
+ "shapes_after_crop": [
15
+ [
16
+ 230,
17
+ 512,
18
+ 512
19
+ ],
20
+ [
21
+ 240,
22
+ 512,
23
+ 512
24
+ ],
25
+ [
26
+ 260,
27
+ 512,
28
+ 512
29
+ ],
30
+ [
31
+ 215,
32
+ 512,
33
+ 512
34
+ ],
35
+ [
36
+ 260,
37
+ 512,
38
+ 512
39
+ ],
40
+ [
41
+ 220,
42
+ 512,
43
+ 512
44
+ ],
45
+ [
46
+ 210,
47
+ 512,
48
+ 512
49
+ ],
50
+ [
51
+ 240,
52
+ 512,
53
+ 512
54
+ ],
55
+ [
56
+ 265,
57
+ 512,
58
+ 512
59
+ ],
60
+ [
61
+ 229,
62
+ 512,
63
+ 512
64
+ ],
65
+ [
66
+ 230,
67
+ 512,
68
+ 512
69
+ ],
70
+ [
71
+ 243,
72
+ 512,
73
+ 512
74
+ ],
75
+ [
76
+ 230,
77
+ 512,
78
+ 512
79
+ ],
80
+ [
81
+ 250,
82
+ 512,
83
+ 512
84
+ ],
85
+ [
86
+ 250,
87
+ 512,
88
+ 512
89
+ ],
90
+ [
91
+ 245,
92
+ 512,
93
+ 512
94
+ ],
95
+ [
96
+ 235,
97
+ 512,
98
+ 512
99
+ ],
100
+ [
101
+ 250,
102
+ 512,
103
+ 512
104
+ ],
105
+ [
106
+ 242,
107
+ 512,
108
+ 512
109
+ ],
110
+ [
111
+ 241,
112
+ 512,
113
+ 512
114
+ ],
115
+ [
116
+ 210,
117
+ 512,
118
+ 512
119
+ ],
120
+ [
121
+ 255,
122
+ 512,
123
+ 512
124
+ ],
125
+ [
126
+ 246,
127
+ 512,
128
+ 512
129
+ ],
130
+ [
131
+ 240,
132
+ 512,
133
+ 512
134
+ ],
135
+ [
136
+ 245,
137
+ 512,
138
+ 512
139
+ ],
140
+ [
141
+ 250,
142
+ 512,
143
+ 512
144
+ ],
145
+ [
146
+ 249,
147
+ 512,
148
+ 512
149
+ ],
150
+ [
151
+ 210,
152
+ 512,
153
+ 512
154
+ ],
155
+ [
156
+ 210,
157
+ 512,
158
+ 512
159
+ ],
160
+ [
161
+ 244,
162
+ 512,
163
+ 512
164
+ ],
165
+ [
166
+ 230,
167
+ 512,
168
+ 512
169
+ ],
170
+ [
171
+ 235,
172
+ 512,
173
+ 512
174
+ ],
175
+ [
176
+ 260,
177
+ 512,
178
+ 512
179
+ ],
180
+ [
181
+ 241,
182
+ 512,
183
+ 512
184
+ ],
185
+ [
186
+ 220,
187
+ 512,
188
+ 512
189
+ ],
190
+ [
191
+ 240,
192
+ 512,
193
+ 512
194
+ ],
195
+ [
196
+ 190,
197
+ 512,
198
+ 512
199
+ ],
200
+ [
201
+ 255,
202
+ 512,
203
+ 512
204
+ ],
205
+ [
206
+ 230,
207
+ 512,
208
+ 512
209
+ ],
210
+ [
211
+ 255,
212
+ 512,
213
+ 512
214
+ ],
215
+ [
216
+ 236,
217
+ 512,
218
+ 512
219
+ ],
220
+ [
221
+ 241,
222
+ 512,
223
+ 512
224
+ ],
225
+ [
226
+ 220,
227
+ 512,
228
+ 512
229
+ ],
230
+ [
231
+ 241,
232
+ 512,
233
+ 512
234
+ ],
235
+ [
236
+ 245,
237
+ 512,
238
+ 512
239
+ ],
240
+ [
241
+ 241,
242
+ 512,
243
+ 512
244
+ ],
245
+ [
246
+ 250,
247
+ 512,
248
+ 512
249
+ ],
250
+ [
251
+ 210,
252
+ 512,
253
+ 512
254
+ ],
255
+ [
256
+ 250,
257
+ 512,
258
+ 512
259
+ ],
260
+ [
261
+ 266,
262
+ 512,
263
+ 512
264
+ ],
265
+ [
266
+ 220,
267
+ 512,
268
+ 512
269
+ ],
270
+ [
271
+ 230,
272
+ 512,
273
+ 512
274
+ ],
275
+ [
276
+ 280,
277
+ 512,
278
+ 512
279
+ ],
280
+ [
281
+ 260,
282
+ 512,
283
+ 512
284
+ ],
285
+ [
286
+ 245,
287
+ 512,
288
+ 512
289
+ ],
290
+ [
291
+ 220,
292
+ 512,
293
+ 512
294
+ ],
295
+ [
296
+ 240,
297
+ 512,
298
+ 512
299
+ ],
300
+ [
301
+ 250,
302
+ 512,
303
+ 512
304
+ ],
305
+ [
306
+ 226,
307
+ 512,
308
+ 512
309
+ ],
310
+ [
311
+ 240,
312
+ 512,
313
+ 512
314
+ ]
315
+ ],
316
+ "spacings": [
317
+ [
318
+ 2.5,
319
+ 1.269531011581421,
320
+ 1.269531011581421
321
+ ],
322
+ [
323
+ 2.5,
324
+ 1.269531011581421,
325
+ 1.269531011581421
326
+ ],
327
+ [
328
+ 2.5,
329
+ 1.269531011581421,
330
+ 1.269531011581421
331
+ ],
332
+ [
333
+ 2.5,
334
+ 1.269531011581421,
335
+ 1.269531011581421
336
+ ],
337
+ [
338
+ 2.5,
339
+ 1.269531011581421,
340
+ 1.269531011581421
341
+ ],
342
+ [
343
+ 2.5,
344
+ 1.269531011581421,
345
+ 1.269531011581421
346
+ ],
347
+ [
348
+ 2.5,
349
+ 1.269531011581421,
350
+ 1.269531011581421
351
+ ],
352
+ [
353
+ 2.5,
354
+ 1.269531011581421,
355
+ 1.269531011581421
356
+ ],
357
+ [
358
+ 2.5,
359
+ 1.269531011581421,
360
+ 1.269531011581421
361
+ ],
362
+ [
363
+ 2.5,
364
+ 1.269531011581421,
365
+ 1.269531011581421
366
+ ],
367
+ [
368
+ 2.5,
369
+ 1.269531011581421,
370
+ 1.269531011581421
371
+ ],
372
+ [
373
+ 2.5,
374
+ 1.269531011581421,
375
+ 1.269531011581421
376
+ ],
377
+ [
378
+ 2.5,
379
+ 1.269531011581421,
380
+ 1.269531011581421
381
+ ],
382
+ [
383
+ 2.5,
384
+ 1.269531011581421,
385
+ 1.269531011581421
386
+ ],
387
+ [
388
+ 2.5,
389
+ 1.269531011581421,
390
+ 1.269531011581421
391
+ ],
392
+ [
393
+ 2.5,
394
+ 1.269531011581421,
395
+ 1.269531011581421
396
+ ],
397
+ [
398
+ 2.5,
399
+ 1.269531011581421,
400
+ 1.269531011581421
401
+ ],
402
+ [
403
+ 2.5,
404
+ 1.269531011581421,
405
+ 1.269531011581421
406
+ ],
407
+ [
408
+ 2.5,
409
+ 1.269531011581421,
410
+ 1.269531011581421
411
+ ],
412
+ [
413
+ 2.5,
414
+ 1.269531011581421,
415
+ 1.269531011581421
416
+ ],
417
+ [
418
+ 2.5,
419
+ 1.269531011581421,
420
+ 1.269531011581421
421
+ ],
422
+ [
423
+ 2.5,
424
+ 1.269531011581421,
425
+ 1.269531011581421
426
+ ],
427
+ [
428
+ 2.5,
429
+ 1.269531011581421,
430
+ 1.269531011581421
431
+ ],
432
+ [
433
+ 2.5,
434
+ 1.269531011581421,
435
+ 1.269531011581421
436
+ ],
437
+ [
438
+ 2.5,
439
+ 1.269531011581421,
440
+ 1.269531011581421
441
+ ],
442
+ [
443
+ 2.5,
444
+ 1.269531011581421,
445
+ 1.269531011581421
446
+ ],
447
+ [
448
+ 2.5,
449
+ 1.269531011581421,
450
+ 1.269531011581421
451
+ ],
452
+ [
453
+ 2.5,
454
+ 1.269531011581421,
455
+ 1.269531011581421
456
+ ],
457
+ [
458
+ 2.5,
459
+ 1.269531011581421,
460
+ 1.269531011581421
461
+ ],
462
+ [
463
+ 2.5,
464
+ 1.269531011581421,
465
+ 1.269531011581421
466
+ ],
467
+ [
468
+ 2.5,
469
+ 1.269531011581421,
470
+ 1.269531011581421
471
+ ],
472
+ [
473
+ 2.5,
474
+ 1.269531011581421,
475
+ 1.269531011581421
476
+ ],
477
+ [
478
+ 2.5,
479
+ 1.269531011581421,
480
+ 1.269531011581421
481
+ ],
482
+ [
483
+ 2.5,
484
+ 1.269531011581421,
485
+ 1.269531011581421
486
+ ],
487
+ [
488
+ 2.5,
489
+ 1.269531011581421,
490
+ 1.269531011581421
491
+ ],
492
+ [
493
+ 2.5,
494
+ 1.269531011581421,
495
+ 1.269531011581421
496
+ ],
497
+ [
498
+ 2.5,
499
+ 1.269531011581421,
500
+ 1.269531011581421
501
+ ],
502
+ [
503
+ 2.5,
504
+ 1.269531011581421,
505
+ 1.269531011581421
506
+ ],
507
+ [
508
+ 2.5,
509
+ 1.269531011581421,
510
+ 1.269531011581421
511
+ ],
512
+ [
513
+ 2.5,
514
+ 1.269531011581421,
515
+ 1.269531011581421
516
+ ],
517
+ [
518
+ 2.5,
519
+ 1.269531011581421,
520
+ 1.269531011581421
521
+ ],
522
+ [
523
+ 2.5,
524
+ 1.269531011581421,
525
+ 1.269531011581421
526
+ ],
527
+ [
528
+ 2.5,
529
+ 1.269531011581421,
530
+ 1.269531011581421
531
+ ],
532
+ [
533
+ 2.5,
534
+ 1.269531011581421,
535
+ 1.269531011581421
536
+ ],
537
+ [
538
+ 2.5,
539
+ 1.269531011581421,
540
+ 1.269531011581421
541
+ ],
542
+ [
543
+ 2.5,
544
+ 1.269531011581421,
545
+ 1.269531011581421
546
+ ],
547
+ [
548
+ 2.5,
549
+ 1.269531011581421,
550
+ 1.269531011581421
551
+ ],
552
+ [
553
+ 2.5,
554
+ 1.269531011581421,
555
+ 1.269531011581421
556
+ ],
557
+ [
558
+ 2.5,
559
+ 1.269531011581421,
560
+ 1.269531011581421
561
+ ],
562
+ [
563
+ 2.5,
564
+ 1.269531011581421,
565
+ 1.269531011581421
566
+ ],
567
+ [
568
+ 2.5,
569
+ 1.269531011581421,
570
+ 1.269531011581421
571
+ ],
572
+ [
573
+ 2.5,
574
+ 1.269531011581421,
575
+ 1.269531011581421
576
+ ],
577
+ [
578
+ 2.5,
579
+ 1.269531011581421,
580
+ 1.269531011581421
581
+ ],
582
+ [
583
+ 2.5,
584
+ 1.269531011581421,
585
+ 1.269531011581421
586
+ ],
587
+ [
588
+ 2.5,
589
+ 1.269531011581421,
590
+ 1.269531011581421
591
+ ],
592
+ [
593
+ 2.5,
594
+ 1.269531011581421,
595
+ 1.269531011581421
596
+ ],
597
+ [
598
+ 2.5,
599
+ 1.269531011581421,
600
+ 1.269531011581421
601
+ ],
602
+ [
603
+ 2.5,
604
+ 1.269531011581421,
605
+ 1.269531011581421
606
+ ],
607
+ [
608
+ 2.5,
609
+ 1.269531011581421,
610
+ 1.269531011581421
611
+ ],
612
+ [
613
+ 2.5,
614
+ 1.269531011581421,
615
+ 1.269531011581421
616
+ ]
617
+ ]
618
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/checkpoint_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8acdeaa823073099ac98e716c20fcc574b5749f0a21978eb2ad91e2002f6fca1
3
+ size 246436257
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/checkpoint_latest.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d79a28d029c7b6774bd1284c0d57ea77d1b77579ee5d60364fa8b94dce336363
3
+ size 246425813
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/debug.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_best_ema": "None",
3
+ "batch_size": "2",
4
+ "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}",
5
+ "configuration_name": "3d_lowres",
6
+ "cudnn_version": 8500,
7
+ "current_epoch": "0",
8
+ "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f1bfb640f10>",
9
+ "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f1bfb640f50>",
10
+ "dataloader_train.num_processes": "12",
11
+ "dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [80, 192, 160], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-0.5235987755982988, 0.5235987755982988), angle_y = (-0.5235987755982988, 0.5235987755982988), angle_z = (-0.5235987755982988, 0.5235987755982988), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
+ "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f1bfba5f110>",
13
+ "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f1bfb5c31d0>",
14
+ "dataloader_val.num_processes": "6",
15
+ "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
+ "dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}",
17
+ "device": "cuda:0",
18
+ "disable_checkpointing": "False",
19
+ "fold": "0",
20
+ "folder_with_segs_from_previous_stage": "None",
21
+ "gpu_name": "NVIDIA GeForce GTX 1080 Ti",
22
+ "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7f1bfb5c2f10>",
23
+ "hostname": "vipadmin-Z10PE-D16-WS",
24
+ "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
+ "initial_lr": "0.01",
26
+ "is_cascaded": "False",
27
+ "is_ddp": "False",
28
+ "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7f1bfb5c2ed0>",
29
+ "local_rank": "0",
30
+ "log_file": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/training_log_2023_11_5_22_05_41.txt",
31
+ "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7f1bfbb7d950>",
32
+ "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
+ "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7f1bfba5f950>",
34
+ "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}, 'configuration': '3d_lowres', 'fold': 0, 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvp': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
+ "network": "PlainConvUNet",
36
+ "num_epochs": "1000",
37
+ "num_input_channels": "1",
38
+ "num_iterations_per_epoch": "250",
39
+ "num_val_iterations_per_epoch": "50",
40
+ "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
+ "output_folder": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0",
42
+ "output_folder_base": "./data/nnUNet_results/Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres",
43
+ "oversample_foreground_percent": "0.33",
44
+ "plans_manager": "{'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}",
45
+ "preprocessed_dataset_folder": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/nnUNetPlans_3d_lowres",
46
+ "preprocessed_dataset_folder_base": "./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP",
47
+ "save_every": "50",
48
+ "torch_version": "2.0.1+cu117",
49
+ "unpack_dataset": "True",
50
+ "was_initialized": "True",
51
+ "weight_decay": "3e-05"
52
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/network_architecture ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ digraph {
2
+ graph [bgcolor="#FFFFFF" color="#000000" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" pad="1.0,0.5" rankdir=LR]
3
+ node [color="#000000" fillcolor="#E8E8E8" fontcolor="#000000" fontname=Times fontsize=10 margin="0,0" shape=box style=filled]
4
+ edge [color="#000000" fontcolor="#000000" fontname=Times fontsize=10 style=solid]
5
+ "/outputs/109" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
6
+ "/outputs/110" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
7
+ "/outputs/111" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
8
+ "/outputs/112" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
9
+ "/outputs/113" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
10
+ "/outputs/114" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
11
+ "/outputs/115" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
12
+ "/outputs/116" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
13
+ "/outputs/117" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
14
+ "/outputs/118" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
15
+ "/outputs/119" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
16
+ "/outputs/120" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
17
+ "/outputs/121" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
18
+ "/outputs/122" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
19
+ "/outputs/123" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
20
+ "/outputs/124" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
21
+ "/outputs/125" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
22
+ "/outputs/126" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
23
+ "/outputs/127" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
24
+ "/outputs/128" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
25
+ "/outputs/129" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
26
+ "/outputs/130" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
27
+ "/outputs/131" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
28
+ "/outputs/132" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
29
+ "/outputs/133" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [2, 2, 2]</td></tr></table>>]
30
+ "/outputs/134" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
31
+ "/outputs/135" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
32
+ "/outputs/136" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
33
+ "/outputs/137" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
34
+ "/outputs/138" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
35
+ "/outputs/139" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 2, 2]</td></tr></table>>]
36
+ "/outputs/140" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
37
+ "/outputs/141" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
38
+ "/outputs/142" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
39
+ "/outputs/143" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
40
+ "/outputs/144" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
41
+ "/outputs/145" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [1, 2, 2], stride: [1, 2, 2]</td></tr></table>>]
42
+ "/outputs/146" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
43
+ "/outputs/147" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
44
+ "/outputs/148" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
45
+ "/outputs/149" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
46
+ "/outputs/150" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
47
+ "/outputs/151" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
48
+ "/outputs/152" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
49
+ "/outputs/153" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
50
+ "/outputs/154" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
51
+ "/outputs/155" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
52
+ "/outputs/156" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
53
+ "/outputs/157" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
54
+ "/outputs/158" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
55
+ "/outputs/159" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
56
+ "/outputs/160" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
57
+ "/outputs/161" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
58
+ "/outputs/162" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
59
+ "/outputs/163" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
60
+ "/outputs/164" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
61
+ "/outputs/165" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
62
+ "/outputs/166" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
63
+ "/outputs/167" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
64
+ "/outputs/168" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
65
+ "/outputs/169" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
66
+ "/outputs/170" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
67
+ "/outputs/171" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
68
+ "/outputs/172" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
69
+ "/outputs/173" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
70
+ "/outputs/174" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
71
+ "/outputs/175" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
72
+ "/outputs/176" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
73
+ "/outputs/177" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
74
+ "/outputs/178" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
75
+ "/outputs/179" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
76
+ "/outputs/180" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
77
+ "/outputs/181" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>ConvTranspose, kernel_size: [2, 2, 2], stride: [2, 2, 2]</td></tr></table>>]
78
+ "/outputs/182" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Concat</td></tr></table>>]
79
+ "/outputs/183" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
80
+ "/outputs/184" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
81
+ "/outputs/185" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
82
+ "/outputs/186" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [3, 3, 3], stride: [1, 1, 1]</td></tr></table>>]
83
+ "/outputs/187" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>InstanceNormalization</td></tr></table>>]
84
+ "/outputs/188" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>LeakyRelu</td></tr></table>>]
85
+ "/outputs/189" [label=<<table border='0' cellborder='0' cellpadding='0'><tr><td cellpadding='6'>Conv, kernel_size: [1, 1, 1], stride: [1, 1, 1]</td></tr></table>>]
86
+ "/outputs/109" -> "/outputs/110" [label="1x32x80x192x160"]
87
+ "/outputs/110" -> "/outputs/111" [label="1x32x80x192x160"]
88
+ "/outputs/111" -> "/outputs/112" [label="1x32x80x192x160"]
89
+ "/outputs/112" -> "/outputs/113" [label="1x32x80x192x160"]
90
+ "/outputs/113" -> "/outputs/114" [label="1x32x80x192x160"]
91
+ "/outputs/114" -> "/outputs/115" [label="1x32x80x192x160"]
92
+ "/outputs/114" -> "/outputs/182" [label="1x32x80x192x160"]
93
+ "/outputs/115" -> "/outputs/116" [label="1x64x40x96x80"]
94
+ "/outputs/116" -> "/outputs/117" [label="1x64x40x96x80"]
95
+ "/outputs/117" -> "/outputs/118" [label="1x64x40x96x80"]
96
+ "/outputs/118" -> "/outputs/119" [label="1x64x40x96x80"]
97
+ "/outputs/119" -> "/outputs/120" [label="1x64x40x96x80"]
98
+ "/outputs/120" -> "/outputs/121" [label="1x64x40x96x80"]
99
+ "/outputs/120" -> "/outputs/173" [label="1x64x40x96x80"]
100
+ "/outputs/121" -> "/outputs/122" [label="1x128x20x48x40"]
101
+ "/outputs/122" -> "/outputs/123" [label="1x128x20x48x40"]
102
+ "/outputs/123" -> "/outputs/124" [label="1x128x20x48x40"]
103
+ "/outputs/124" -> "/outputs/125" [label="1x128x20x48x40"]
104
+ "/outputs/125" -> "/outputs/126" [label="1x128x20x48x40"]
105
+ "/outputs/126" -> "/outputs/127" [label="1x128x20x48x40"]
106
+ "/outputs/126" -> "/outputs/164" [label="1x128x20x48x40"]
107
+ "/outputs/127" -> "/outputs/128" [label="1x256x10x24x20"]
108
+ "/outputs/128" -> "/outputs/129" [label="1x256x10x24x20"]
109
+ "/outputs/129" -> "/outputs/130" [label="1x256x10x24x20"]
110
+ "/outputs/130" -> "/outputs/131" [label="1x256x10x24x20"]
111
+ "/outputs/131" -> "/outputs/132" [label="1x256x10x24x20"]
112
+ "/outputs/132" -> "/outputs/133" [label="1x256x10x24x20"]
113
+ "/outputs/132" -> "/outputs/155" [label="1x256x10x24x20"]
114
+ "/outputs/133" -> "/outputs/134" [label="1x320x5x12x10"]
115
+ "/outputs/134" -> "/outputs/135" [label="1x320x5x12x10"]
116
+ "/outputs/135" -> "/outputs/136" [label="1x320x5x12x10"]
117
+ "/outputs/136" -> "/outputs/137" [label="1x320x5x12x10"]
118
+ "/outputs/137" -> "/outputs/138" [label="1x320x5x12x10"]
119
+ "/outputs/138" -> "/outputs/139" [label="1x320x5x12x10"]
120
+ "/outputs/138" -> "/outputs/146" [label="1x320x5x12x10"]
121
+ "/outputs/139" -> "/outputs/140" [label="1x320x5x6x5"]
122
+ "/outputs/140" -> "/outputs/141" [label="1x320x5x6x5"]
123
+ "/outputs/141" -> "/outputs/142" [label="1x320x5x6x5"]
124
+ "/outputs/142" -> "/outputs/143" [label="1x320x5x6x5"]
125
+ "/outputs/143" -> "/outputs/144" [label="1x320x5x6x5"]
126
+ "/outputs/144" -> "/outputs/145" [label="1x320x5x6x5"]
127
+ "/outputs/145" -> "/outputs/146" [label="1x320x5x12x10"]
128
+ "/outputs/146" -> "/outputs/147" [label="1x640x5x12x10"]
129
+ "/outputs/147" -> "/outputs/148" [label="1x320x5x12x10"]
130
+ "/outputs/148" -> "/outputs/149" [label="1x320x5x12x10"]
131
+ "/outputs/149" -> "/outputs/150" [label="1x320x5x12x10"]
132
+ "/outputs/150" -> "/outputs/151" [label="1x320x5x12x10"]
133
+ "/outputs/151" -> "/outputs/152" [label="1x320x5x12x10"]
134
+ "/outputs/152" -> "/outputs/153" [label="1x320x5x12x10"]
135
+ "/outputs/152" -> "/outputs/154" [label="1x320x5x12x10"]
136
+ "/outputs/154" -> "/outputs/155" [label="1x256x10x24x20"]
137
+ "/outputs/155" -> "/outputs/156" [label="1x512x10x24x20"]
138
+ "/outputs/156" -> "/outputs/157" [label="1x256x10x24x20"]
139
+ "/outputs/157" -> "/outputs/158" [label="1x256x10x24x20"]
140
+ "/outputs/158" -> "/outputs/159" [label="1x256x10x24x20"]
141
+ "/outputs/159" -> "/outputs/160" [label="1x256x10x24x20"]
142
+ "/outputs/160" -> "/outputs/161" [label="1x256x10x24x20"]
143
+ "/outputs/161" -> "/outputs/162" [label="1x256x10x24x20"]
144
+ "/outputs/161" -> "/outputs/163" [label="1x256x10x24x20"]
145
+ "/outputs/163" -> "/outputs/164" [label="1x128x20x48x40"]
146
+ "/outputs/164" -> "/outputs/165" [label="1x256x20x48x40"]
147
+ "/outputs/165" -> "/outputs/166" [label="1x128x20x48x40"]
148
+ "/outputs/166" -> "/outputs/167" [label="1x128x20x48x40"]
149
+ "/outputs/167" -> "/outputs/168" [label="1x128x20x48x40"]
150
+ "/outputs/168" -> "/outputs/169" [label="1x128x20x48x40"]
151
+ "/outputs/169" -> "/outputs/170" [label="1x128x20x48x40"]
152
+ "/outputs/170" -> "/outputs/171" [label="1x128x20x48x40"]
153
+ "/outputs/170" -> "/outputs/172" [label="1x128x20x48x40"]
154
+ "/outputs/172" -> "/outputs/173" [label="1x64x40x96x80"]
155
+ "/outputs/173" -> "/outputs/174" [label="1x128x40x96x80"]
156
+ "/outputs/174" -> "/outputs/175" [label="1x64x40x96x80"]
157
+ "/outputs/175" -> "/outputs/176" [label="1x64x40x96x80"]
158
+ "/outputs/176" -> "/outputs/177" [label="1x64x40x96x80"]
159
+ "/outputs/177" -> "/outputs/178" [label="1x64x40x96x80"]
160
+ "/outputs/178" -> "/outputs/179" [label="1x64x40x96x80"]
161
+ "/outputs/179" -> "/outputs/180" [label="1x64x40x96x80"]
162
+ "/outputs/179" -> "/outputs/181" [label="1x64x40x96x80"]
163
+ "/outputs/181" -> "/outputs/182" [label="1x32x80x192x160"]
164
+ "/outputs/182" -> "/outputs/183" [label="1x64x80x192x160"]
165
+ "/outputs/183" -> "/outputs/184" [label="1x32x80x192x160"]
166
+ "/outputs/184" -> "/outputs/185" [label="1x32x80x192x160"]
167
+ "/outputs/185" -> "/outputs/186" [label="1x32x80x192x160"]
168
+ "/outputs/186" -> "/outputs/187" [label="1x32x80x192x160"]
169
+ "/outputs/187" -> "/outputs/188" [label="1x32x80x192x160"]
170
+ "/outputs/188" -> "/outputs/189" [label="1x32x80x192x160"]
171
+ }
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/progress.png ADDED
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/fold_0/training_log_2023_11_5_22_05_41.txt ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #######################################################################
3
+ Please cite the following paper when using nnU-Net:
4
+ Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
+ #######################################################################
6
+
7
+
8
+ This is the configuration used by this training:
9
+ Configuration name: 3d_lowres
10
+ {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}
11
+
12
+ These are the global plan.json settings:
13
+ {'dataset_name': 'Dataset721_TSPrimeCTVP', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 882.0, 'mean': 45.35713577270508, 'median': 48.0, 'min': -118.0, 'percentile_00_5': -48.0, 'percentile_99_5': 103.0, 'std': 26.203161239624023}}}
14
+
15
+ 2023-11-05 22:05:43.766089: unpacking dataset...
16
+ 2023-11-05 22:05:52.983822: unpacking done...
17
+ 2023-11-05 22:05:53.001199: do_dummy_2d_data_aug: False
18
+ 2023-11-05 22:05:53.001824: Using splits from existing split file: ./data/nnUNet_preprocessed/Dataset721_TSPrimeCTVP/splits_final.json
19
+ 2023-11-05 22:05:53.022583: The split file contains 5 splits.
20
+ 2023-11-05 22:05:53.022662: Desired fold for training: 0
21
+ 2023-11-05 22:05:53.022732: This split has 48 training and 12 validation cases.
22
+ 2023-11-05 22:06:32.735970: Unable to plot network architecture:
23
+ 2023-11-05 22:06:32.736120: failed to execute PosixPath('dot'), make sure the Graphviz executables are on your systems' PATH
24
+ 2023-11-05 22:06:32.837145:
25
+ 2023-11-05 22:06:32.837218: Epoch 0
26
+ 2023-11-05 22:06:32.837328: Current learning rate: 0.01
27
+ 2023-11-05 22:20:54.774893: train_loss 0.0022
28
+ 2023-11-05 22:20:54.775081: val_loss -0.1688
29
+ 2023-11-05 22:20:54.775169: Pseudo dice [0.0]
30
+ 2023-11-05 22:20:54.775265: Epoch time: 861.94 s
31
+ 2023-11-05 22:20:54.775342: Yayy! New best EMA pseudo Dice: 0.0
32
+ 2023-11-05 22:20:56.371249:
33
+ 2023-11-05 22:20:56.371469: Epoch 1
34
+ 2023-11-05 22:20:56.371657: Current learning rate: 0.00999
35
+ 2023-11-05 22:31:50.078054: train_loss -0.4984
36
+ 2023-11-05 22:31:50.078262: val_loss -0.6578
37
+ 2023-11-05 22:31:50.078343: Pseudo dice [0.7269]
38
+ 2023-11-05 22:31:50.078430: Epoch time: 653.71 s
39
+ 2023-11-05 22:31:50.078506: Yayy! New best EMA pseudo Dice: 0.0727
40
+ 2023-11-05 22:31:53.171840:
41
+ 2023-11-05 22:31:53.172024: Epoch 2
42
+ 2023-11-05 22:31:53.172153: Current learning rate: 0.00998
43
+ 2023-11-05 22:42:48.347788: train_loss -0.6775
44
+ 2023-11-05 22:42:48.347950: val_loss -0.7266
45
+ 2023-11-05 22:42:48.348027: Pseudo dice [0.7937]
46
+ 2023-11-05 22:42:48.348120: Epoch time: 655.18 s
47
+ 2023-11-05 22:42:48.348189: Yayy! New best EMA pseudo Dice: 0.1448
48
+ 2023-11-05 22:42:51.579526:
49
+ 2023-11-05 22:42:51.579650: Epoch 3
50
+ 2023-11-05 22:42:51.579753: Current learning rate: 0.00997
51
+ 2023-11-05 22:53:46.176134: train_loss -0.7185
52
+ 2023-11-05 22:53:46.176269: val_loss -0.7461
53
+ 2023-11-05 22:53:46.176361: Pseudo dice [0.8043]
54
+ 2023-11-05 22:53:46.176449: Epoch time: 654.6 s
55
+ 2023-11-05 22:53:46.176519: Yayy! New best EMA pseudo Dice: 0.2107
56
+ 2023-11-05 22:53:49.545828:
57
+ 2023-11-05 22:53:49.545945: Epoch 4
58
+ 2023-11-05 22:53:49.546050: Current learning rate: 0.00996
59
+ 2023-11-05 23:04:43.303561: train_loss -0.7317
60
+ 2023-11-05 23:04:43.303715: val_loss -0.7646
61
+ 2023-11-05 23:04:43.303797: Pseudo dice [0.8278]
62
+ 2023-11-05 23:04:43.303883: Epoch time: 653.76 s
63
+ 2023-11-05 23:04:43.303957: Yayy! New best EMA pseudo Dice: 0.2725
64
+ 2023-11-05 23:04:46.916903:
65
+ 2023-11-05 23:04:46.917109: Epoch 5
66
+ 2023-11-05 23:04:46.917217: Current learning rate: 0.00995
67
+ 2023-11-05 23:15:41.269564: train_loss -0.7454
68
+ 2023-11-05 23:15:41.269719: val_loss -0.776
69
+ 2023-11-05 23:15:41.269796: Pseudo dice [0.8309]
70
+ 2023-11-05 23:15:41.269877: Epoch time: 654.35 s
71
+ 2023-11-05 23:15:41.269946: Yayy! New best EMA pseudo Dice: 0.3283
72
+ 2023-11-05 23:15:44.430620:
73
+ 2023-11-05 23:15:44.430745: Epoch 6
74
+ 2023-11-05 23:15:44.430851: Current learning rate: 0.00995
75
+ 2023-11-05 23:26:37.331294: train_loss -0.7739
76
+ 2023-11-05 23:26:37.331445: val_loss -0.7867
77
+ 2023-11-05 23:26:37.331522: Pseudo dice [0.8319]
78
+ 2023-11-05 23:26:37.331612: Epoch time: 652.9 s
79
+ 2023-11-05 23:26:37.331681: Yayy! New best EMA pseudo Dice: 0.3787
80
+ 2023-11-05 23:26:40.667429:
81
+ 2023-11-05 23:26:40.667527: Epoch 7
82
+ 2023-11-05 23:26:40.667621: Current learning rate: 0.00994
83
+ 2023-11-05 23:37:36.057433: train_loss -0.7721
84
+ 2023-11-05 23:37:36.057588: val_loss -0.7854
85
+ 2023-11-05 23:37:36.057666: Pseudo dice [0.8414]
86
+ 2023-11-05 23:37:36.057748: Epoch time: 655.39 s
87
+ 2023-11-05 23:37:36.057818: Yayy! New best EMA pseudo Dice: 0.4249
88
+ 2023-11-05 23:37:39.249048:
89
+ 2023-11-05 23:37:39.249249: Epoch 8
90
+ 2023-11-05 23:37:39.249468: Current learning rate: 0.00993
91
+ 2023-11-05 23:48:34.520747: train_loss -0.7639
92
+ 2023-11-05 23:48:34.520913: val_loss -0.7863
93
+ 2023-11-05 23:48:34.520990: Pseudo dice [0.8342]
94
+ 2023-11-05 23:48:34.521071: Epoch time: 655.27 s
95
+ 2023-11-05 23:48:34.521140: Yayy! New best EMA pseudo Dice: 0.4659
96
+ 2023-11-05 23:48:37.934005:
97
+ 2023-11-05 23:48:37.934186: Epoch 9
98
+ 2023-11-05 23:48:37.934292: Current learning rate: 0.00992
99
+ 2023-11-05 23:59:31.744097: train_loss -0.7754
100
+ 2023-11-05 23:59:31.744260: val_loss -0.7949
101
+ 2023-11-05 23:59:31.744335: Pseudo dice [0.83]
102
+ 2023-11-05 23:59:31.744416: Epoch time: 653.81 s
103
+ 2023-11-05 23:59:31.744484: Yayy! New best EMA pseudo Dice: 0.5023
104
+ 2023-11-05 23:59:35.221768:
105
+ 2023-11-05 23:59:35.221899: Epoch 10
106
+ 2023-11-05 23:59:35.222059: Current learning rate: 0.00991
107
+ 2023-11-06 00:10:30.542706: train_loss -0.7745
108
+ 2023-11-06 00:10:30.542849: val_loss -0.7849
109
+ 2023-11-06 00:10:30.542925: Pseudo dice [0.8183]
110
+ 2023-11-06 00:10:30.543005: Epoch time: 655.32 s
111
+ 2023-11-06 00:10:30.543073: Yayy! New best EMA pseudo Dice: 0.5339
112
+ 2023-11-06 00:10:33.828026:
113
+ 2023-11-06 00:10:33.828140: Epoch 11
114
+ 2023-11-06 00:10:33.828271: Current learning rate: 0.0099
115
+ 2023-11-06 00:21:29.170071: train_loss -0.7769
116
+ 2023-11-06 00:21:29.170231: val_loss -0.7886
117
+ 2023-11-06 00:21:29.170306: Pseudo dice [0.8436]
118
+ 2023-11-06 00:21:29.170388: Epoch time: 655.34 s
119
+ 2023-11-06 00:21:29.170456: Yayy! New best EMA pseudo Dice: 0.5648
120
+ 2023-11-06 00:21:32.330414:
121
+ 2023-11-06 00:21:32.330636: Epoch 12
122
+ 2023-11-06 00:21:32.330763: Current learning rate: 0.00989
123
+ 2023-11-06 00:32:31.568352: train_loss -0.7846
124
+ 2023-11-06 00:32:31.568504: val_loss -0.7826
125
+ 2023-11-06 00:32:31.568580: Pseudo dice [0.8258]
126
+ 2023-11-06 00:32:31.568660: Epoch time: 659.24 s
127
+ 2023-11-06 00:32:31.568729: Yayy! New best EMA pseudo Dice: 0.5909
128
+ 2023-11-06 00:32:34.977341:
129
+ 2023-11-06 00:32:34.977602: Epoch 13
130
+ 2023-11-06 00:32:34.977876: Current learning rate: 0.00988
131
+ 2023-11-06 00:43:36.160741: train_loss -0.7869
132
+ 2023-11-06 00:43:36.160885: val_loss -0.7834
133
+ 2023-11-06 00:43:36.160981: Pseudo dice [0.8317]
134
+ 2023-11-06 00:43:36.161071: Epoch time: 661.18 s
135
+ 2023-11-06 00:43:36.161148: Yayy! New best EMA pseudo Dice: 0.615
136
+ 2023-11-06 00:43:42.027902:
137
+ 2023-11-06 00:43:42.028009: Epoch 14
138
+ 2023-11-06 00:43:42.028111: Current learning rate: 0.00987
139
+ 2023-11-06 00:54:42.362827: train_loss -0.785
140
+ 2023-11-06 00:54:42.362983: val_loss -0.7905
141
+ 2023-11-06 00:54:42.363072: Pseudo dice [0.8346]
142
+ 2023-11-06 00:54:42.363162: Epoch time: 660.34 s
143
+ 2023-11-06 00:54:42.363242: Yayy! New best EMA pseudo Dice: 0.637
144
+ 2023-11-06 00:54:45.816739:
145
+ 2023-11-06 00:54:45.816881: Epoch 15
146
+ 2023-11-06 00:54:45.816998: Current learning rate: 0.00986
147
+ 2023-11-06 01:05:39.600759: train_loss -0.7845
148
+ 2023-11-06 01:05:39.600898: val_loss -0.7838
149
+ 2023-11-06 01:05:39.600987: Pseudo dice [0.8295]
150
+ 2023-11-06 01:05:39.601068: Epoch time: 653.78 s
151
+ 2023-11-06 01:05:39.601136: Yayy! New best EMA pseudo Dice: 0.6562
152
+ 2023-11-06 01:05:43.176803:
153
+ 2023-11-06 01:05:43.176908: Epoch 16
154
+ 2023-11-06 01:05:43.177009: Current learning rate: 0.00986
155
+ 2023-11-06 01:16:50.574224: train_loss -0.7986
156
+ 2023-11-06 01:16:50.574384: val_loss -0.8016
157
+ 2023-11-06 01:16:50.574479: Pseudo dice [0.845]
158
+ 2023-11-06 01:16:50.574569: Epoch time: 667.4 s
159
+ 2023-11-06 01:16:50.574645: Yayy! New best EMA pseudo Dice: 0.6751
160
+ 2023-11-06 01:16:53.886550:
161
+ 2023-11-06 01:16:53.886660: Epoch 17
162
+ 2023-11-06 01:16:53.886781: Current learning rate: 0.00985
163
+ 2023-11-06 01:27:54.560711: train_loss -0.7931
164
+ 2023-11-06 01:27:54.560856: val_loss -0.81
165
+ 2023-11-06 01:27:54.560950: Pseudo dice [0.8518]
166
+ 2023-11-06 01:27:54.561041: Epoch time: 660.67 s
167
+ 2023-11-06 01:27:54.561118: Yayy! New best EMA pseudo Dice: 0.6928
168
+ 2023-11-06 01:27:57.874895:
169
+ 2023-11-06 01:27:57.875184: Epoch 18
170
+ 2023-11-06 01:27:57.875363: Current learning rate: 0.00984
171
+ 2023-11-06 01:38:55.045314: train_loss -0.7949
172
+ 2023-11-06 01:38:55.045470: val_loss -0.7958
173
+ 2023-11-06 01:38:55.045547: Pseudo dice [0.828]
174
+ 2023-11-06 01:38:55.045628: Epoch time: 657.17 s
175
+ 2023-11-06 01:38:55.045697: Yayy! New best EMA pseudo Dice: 0.7063
176
+ 2023-11-06 01:38:58.180024:
177
+ 2023-11-06 01:38:58.180334: Epoch 19
178
+ 2023-11-06 01:38:58.180515: Current learning rate: 0.00983
179
+ 2023-11-06 01:47:52.886719: train_loss -0.7978
180
+ 2023-11-06 01:47:52.886871: val_loss -0.8033
181
+ 2023-11-06 01:47:52.886948: Pseudo dice [0.8408]
182
+ 2023-11-06 01:47:52.887028: Epoch time: 534.71 s
183
+ 2023-11-06 01:47:52.887096: Yayy! New best EMA pseudo Dice: 0.7197
184
+ 2023-11-06 01:47:56.292678:
185
+ 2023-11-06 01:47:56.292835: Epoch 20
186
+ 2023-11-06 01:47:56.292980: Current learning rate: 0.00982
187
+ 2023-11-06 01:55:44.553798: train_loss -0.8072
188
+ 2023-11-06 01:55:44.553949: val_loss -0.8016
189
+ 2023-11-06 01:55:44.554031: Pseudo dice [0.8365]
190
+ 2023-11-06 01:55:44.554116: Epoch time: 468.26 s
191
+ 2023-11-06 01:55:44.554189: Yayy! New best EMA pseudo Dice: 0.7314
192
+ 2023-11-06 01:55:47.659638:
193
+ 2023-11-06 01:55:47.659833: Epoch 21
194
+ 2023-11-06 01:55:47.660119: Current learning rate: 0.00981
195
+ 2023-11-06 02:03:35.627137: train_loss -0.8073
196
+ 2023-11-06 02:03:35.627289: val_loss -0.809
197
+ 2023-11-06 02:03:35.627386: Pseudo dice [0.8394]
198
+ 2023-11-06 02:03:35.627477: Epoch time: 467.97 s
199
+ 2023-11-06 02:03:35.627555: Yayy! New best EMA pseudo Dice: 0.7422
200
+ 2023-11-06 02:03:38.898279:
201
+ 2023-11-06 02:03:38.898384: Epoch 22
202
+ 2023-11-06 02:03:38.898483: Current learning rate: 0.0098
203
+ 2023-11-06 02:13:19.098450: train_loss -0.8012
204
+ 2023-11-06 02:13:19.098597: val_loss -0.794
205
+ 2023-11-06 02:13:19.098699: Pseudo dice [0.8268]
206
+ 2023-11-06 02:13:19.098793: Epoch time: 580.2 s
207
+ 2023-11-06 02:13:19.098870: Yayy! New best EMA pseudo Dice: 0.7507
208
+ 2023-11-06 02:13:22.567191:
209
+ 2023-11-06 02:13:22.567326: Epoch 23
210
+ 2023-11-06 02:13:22.567432: Current learning rate: 0.00979
211
+ 2023-11-06 02:24:37.768249: train_loss -0.8039
212
+ 2023-11-06 02:24:37.768389: val_loss -0.8143
213
+ 2023-11-06 02:24:37.768477: Pseudo dice [0.8576]
214
+ 2023-11-06 02:24:37.768565: Epoch time: 675.2 s
215
+ 2023-11-06 02:24:37.768634: Yayy! New best EMA pseudo Dice: 0.7614
216
+ 2023-11-06 02:24:41.063595:
217
+ 2023-11-06 02:24:41.063719: Epoch 24
218
+ 2023-11-06 02:24:41.063822: Current learning rate: 0.00978
219
+ 2023-11-06 02:35:56.216298: train_loss -0.8141
220
+ 2023-11-06 02:35:56.216446: val_loss -0.8112
221
+ 2023-11-06 02:35:56.216523: Pseudo dice [0.8314]
222
+ 2023-11-06 02:35:56.216605: Epoch time: 675.15 s
223
+ 2023-11-06 02:35:56.216681: Yayy! New best EMA pseudo Dice: 0.7684
224
+ 2023-11-06 02:35:59.473060:
225
+ 2023-11-06 02:35:59.473267: Epoch 25
226
+ 2023-11-06 02:35:59.473394: Current learning rate: 0.00977
227
+ 2023-11-06 02:47:14.841282: train_loss -0.8238
228
+ 2023-11-06 02:47:14.841446: val_loss -0.8127
229
+ 2023-11-06 02:47:14.841526: Pseudo dice [0.8517]
230
+ 2023-11-06 02:47:14.841607: Epoch time: 675.37 s
231
+ 2023-11-06 02:47:14.841675: Yayy! New best EMA pseudo Dice: 0.7767
232
+ 2023-11-06 02:47:18.199337:
233
+ 2023-11-06 02:47:18.199439: Epoch 26
234
+ 2023-11-06 02:47:18.199549: Current learning rate: 0.00977
235
+ 2023-11-06 02:58:33.463852: train_loss -0.8094
236
+ 2023-11-06 02:58:33.464010: val_loss -0.8178
237
+ 2023-11-06 02:58:33.464087: Pseudo dice [0.8539]
238
+ 2023-11-06 02:58:33.464169: Epoch time: 675.27 s
239
+ 2023-11-06 02:58:33.464237: Yayy! New best EMA pseudo Dice: 0.7844
240
+ 2023-11-06 02:58:36.681627:
241
+ 2023-11-06 02:58:36.681730: Epoch 27
242
+ 2023-11-06 02:58:36.681832: Current learning rate: 0.00976
243
+ 2023-11-06 03:09:51.961360: train_loss -0.8114
244
+ 2023-11-06 03:09:51.961498: val_loss -0.7815
245
+ 2023-11-06 03:09:51.961591: Pseudo dice [0.8219]
246
+ 2023-11-06 03:09:51.961673: Epoch time: 675.28 s
247
+ 2023-11-06 03:09:51.961741: Yayy! New best EMA pseudo Dice: 0.7882
248
+ 2023-11-06 03:09:55.427147:
249
+ 2023-11-06 03:09:55.427268: Epoch 28
250
+ 2023-11-06 03:09:55.427380: Current learning rate: 0.00975
251
+ 2023-11-06 03:21:10.383088: train_loss -0.7997
252
+ 2023-11-06 03:21:10.383252: val_loss -0.7981
253
+ 2023-11-06 03:21:10.383328: Pseudo dice [0.8349]
254
+ 2023-11-06 03:21:10.383410: Epoch time: 674.96 s
255
+ 2023-11-06 03:21:10.383477: Yayy! New best EMA pseudo Dice: 0.7928
256
+ 2023-11-06 03:21:13.641899:
257
+ 2023-11-06 03:21:13.641999: Epoch 29
258
+ 2023-11-06 03:21:13.642121: Current learning rate: 0.00974
259
+ 2023-11-06 03:32:28.908478: train_loss -0.806
260
+ 2023-11-06 03:32:28.908639: val_loss -0.8071
261
+ 2023-11-06 03:32:28.908733: Pseudo dice [0.8445]
262
+ 2023-11-06 03:32:28.908826: Epoch time: 675.27 s
263
+ 2023-11-06 03:32:28.908903: Yayy! New best EMA pseudo Dice: 0.798
264
+ 2023-11-06 03:32:32.713846:
265
+ 2023-11-06 03:32:32.714119: Epoch 30
266
+ 2023-11-06 03:32:32.714329: Current learning rate: 0.00973
267
+ 2023-11-06 03:43:47.583081: train_loss -0.8078
268
+ 2023-11-06 03:43:47.583235: val_loss -0.7946
269
+ 2023-11-06 03:43:47.583393: Pseudo dice [0.8358]
270
+ 2023-11-06 03:43:47.583498: Epoch time: 674.87 s
271
+ 2023-11-06 03:43:47.583567: Yayy! New best EMA pseudo Dice: 0.8018
272
+ 2023-11-06 03:43:50.914172:
273
+ 2023-11-06 03:43:50.914282: Epoch 31
274
+ 2023-11-06 03:43:50.914385: Current learning rate: 0.00972
275
+ 2023-11-06 03:55:05.869875: train_loss -0.8134
276
+ 2023-11-06 03:55:05.870045: val_loss -0.8119
277
+ 2023-11-06 03:55:05.870121: Pseudo dice [0.8536]
278
+ 2023-11-06 03:55:05.870202: Epoch time: 674.96 s
279
+ 2023-11-06 03:55:05.870270: Yayy! New best EMA pseudo Dice: 0.807
280
+ 2023-11-06 03:55:09.261483:
281
+ 2023-11-06 03:55:09.261622: Epoch 32
282
+ 2023-11-06 03:55:09.261738: Current learning rate: 0.00971
283
+ 2023-11-06 04:06:24.365536: train_loss -0.814
284
+ 2023-11-06 04:06:24.365684: val_loss -0.799
285
+ 2023-11-06 04:06:24.365768: Pseudo dice [0.8439]
286
+ 2023-11-06 04:06:24.365851: Epoch time: 675.1 s
287
+ 2023-11-06 04:06:24.365922: Yayy! New best EMA pseudo Dice: 0.8107
288
+ 2023-11-06 04:06:27.632730:
289
+ 2023-11-06 04:06:27.632976: Epoch 33
290
+ 2023-11-06 04:06:27.633147: Current learning rate: 0.0097
291
+ 2023-11-06 04:17:42.891809: train_loss -0.8178
292
+ 2023-11-06 04:17:42.891958: val_loss -0.8109
293
+ 2023-11-06 04:17:42.892051: Pseudo dice [0.8469]
294
+ 2023-11-06 04:17:42.892142: Epoch time: 675.26 s
295
+ 2023-11-06 04:17:42.892221: Yayy! New best EMA pseudo Dice: 0.8143
296
+ 2023-11-06 04:17:46.135388:
297
+ 2023-11-06 04:17:46.135488: Epoch 34
298
+ 2023-11-06 04:17:46.135598: Current learning rate: 0.00969
299
+ 2023-11-06 04:29:01.376284: train_loss -0.817
300
+ 2023-11-06 04:29:01.376450: val_loss -0.8104
301
+ 2023-11-06 04:29:01.376533: Pseudo dice [0.855]
302
+ 2023-11-06 04:29:01.376614: Epoch time: 675.24 s
303
+ 2023-11-06 04:29:01.376683: Yayy! New best EMA pseudo Dice: 0.8184
304
+ 2023-11-06 04:29:04.748322:
305
+ 2023-11-06 04:29:04.748513: Epoch 35
306
+ 2023-11-06 04:29:04.748706: Current learning rate: 0.00968
307
+ 2023-11-06 04:40:19.958354: train_loss -0.8086
308
+ 2023-11-06 04:40:19.958521: val_loss -0.8051
309
+ 2023-11-06 04:40:19.958597: Pseudo dice [0.8335]
310
+ 2023-11-06 04:40:19.958687: Epoch time: 675.21 s
311
+ 2023-11-06 04:40:19.958761: Yayy! New best EMA pseudo Dice: 0.8199
312
+ 2023-11-06 04:40:23.530188:
313
+ 2023-11-06 04:40:23.530292: Epoch 36
314
+ 2023-11-06 04:40:23.530401: Current learning rate: 0.00968
315
+ 2023-11-06 04:51:38.573648: train_loss -0.8159
316
+ 2023-11-06 04:51:38.573810: val_loss -0.8143
317
+ 2023-11-06 04:51:38.573904: Pseudo dice [0.8454]
318
+ 2023-11-06 04:51:38.573995: Epoch time: 675.04 s
319
+ 2023-11-06 04:51:38.574072: Yayy! New best EMA pseudo Dice: 0.8224
320
+ 2023-11-06 04:51:42.123522:
321
+ 2023-11-06 04:51:42.123701: Epoch 37
322
+ 2023-11-06 04:51:42.123864: Current learning rate: 0.00967
323
+ 2023-11-06 05:02:57.064787: train_loss -0.8144
324
+ 2023-11-06 05:02:57.064952: val_loss -0.81
325
+ 2023-11-06 05:02:57.065049: Pseudo dice [0.8435]
326
+ 2023-11-06 05:02:57.065142: Epoch time: 674.94 s
327
+ 2023-11-06 05:02:57.065221: Yayy! New best EMA pseudo Dice: 0.8245
328
+ 2023-11-06 05:03:00.421975:
329
+ 2023-11-06 05:03:00.422125: Epoch 38
330
+ 2023-11-06 05:03:00.422287: Current learning rate: 0.00966
331
+ 2023-11-06 05:14:15.585457: train_loss -0.8254
332
+ 2023-11-06 05:14:15.585616: val_loss -0.7961
333
+ 2023-11-06 05:14:15.585695: Pseudo dice [0.83]
334
+ 2023-11-06 05:14:15.585777: Epoch time: 675.16 s
335
+ 2023-11-06 05:14:15.585849: Yayy! New best EMA pseudo Dice: 0.8251
336
+ 2023-11-06 05:14:19.062620:
337
+ 2023-11-06 05:14:19.062906: Epoch 39
338
+ 2023-11-06 05:14:19.063134: Current learning rate: 0.00965
339
+ 2023-11-06 05:25:34.054948: train_loss -0.8194
340
+ 2023-11-06 05:25:34.055121: val_loss -0.8239
341
+ 2023-11-06 05:25:34.055197: Pseudo dice [0.8627]
342
+ 2023-11-06 05:25:34.055279: Epoch time: 674.99 s
343
+ 2023-11-06 05:25:34.055348: Yayy! New best EMA pseudo Dice: 0.8288
344
+ 2023-11-06 05:25:37.355520:
345
+ 2023-11-06 05:25:37.355628: Epoch 40
346
+ 2023-11-06 05:25:37.355733: Current learning rate: 0.00964
347
+ 2023-11-06 05:36:52.323830: train_loss -0.8123
348
+ 2023-11-06 05:36:52.323998: val_loss -0.8085
349
+ 2023-11-06 05:36:52.324074: Pseudo dice [0.8389]
350
+ 2023-11-06 05:36:52.324154: Epoch time: 674.97 s
351
+ 2023-11-06 05:36:52.324223: Yayy! New best EMA pseudo Dice: 0.8298
352
+ 2023-11-06 05:36:55.576262:
353
+ 2023-11-06 05:36:55.576443: Epoch 41
354
+ 2023-11-06 05:36:55.576548: Current learning rate: 0.00963
355
+ 2023-11-06 05:48:11.067998: train_loss -0.8271
356
+ 2023-11-06 05:48:11.068160: val_loss -0.8201
357
+ 2023-11-06 05:48:11.068256: Pseudo dice [0.857]
358
+ 2023-11-06 05:48:11.068349: Epoch time: 675.49 s
359
+ 2023-11-06 05:48:11.068427: Yayy! New best EMA pseudo Dice: 0.8326
360
+ 2023-11-06 05:48:14.317959:
361
+ 2023-11-06 05:48:14.318098: Epoch 42
362
+ 2023-11-06 05:48:14.318215: Current learning rate: 0.00962
363
+ 2023-11-06 05:59:29.383212: train_loss -0.8256
364
+ 2023-11-06 05:59:29.383366: val_loss -0.8221
365
+ 2023-11-06 05:59:29.383451: Pseudo dice [0.8575]
366
+ 2023-11-06 05:59:29.383550: Epoch time: 675.07 s
367
+ 2023-11-06 05:59:29.383627: Yayy! New best EMA pseudo Dice: 0.835
368
+ 2023-11-06 05:59:32.576050:
369
+ 2023-11-06 05:59:32.576236: Epoch 43
370
+ 2023-11-06 05:59:32.576383: Current learning rate: 0.00961
371
+ 2023-11-06 06:10:47.765534: train_loss -0.8276
372
+ 2023-11-06 06:10:47.765698: val_loss -0.8066
373
+ 2023-11-06 06:10:47.765776: Pseudo dice [0.8416]
374
+ 2023-11-06 06:10:47.765857: Epoch time: 675.19 s
375
+ 2023-11-06 06:10:47.765927: Yayy! New best EMA pseudo Dice: 0.8357
376
+ 2023-11-06 06:10:51.414655:
377
+ 2023-11-06 06:10:51.414837: Epoch 44
378
+ 2023-11-06 06:10:51.415005: Current learning rate: 0.0096
379
+ 2023-11-06 06:22:06.945473: train_loss -0.8151
380
+ 2023-11-06 06:22:06.945662: val_loss -0.8149
381
+ 2023-11-06 06:22:06.945739: Pseudo dice [0.842]
382
+ 2023-11-06 06:22:06.945817: Epoch time: 675.53 s
383
+ 2023-11-06 06:22:06.945896: Yayy! New best EMA pseudo Dice: 0.8363
384
+ 2023-11-06 06:22:10.103697:
385
+ 2023-11-06 06:22:10.103803: Epoch 45
386
+ 2023-11-06 06:22:10.103918: Current learning rate: 0.00959
387
+ 2023-11-06 06:33:25.378408: train_loss -0.8281
388
+ 2023-11-06 06:33:25.378564: val_loss -0.8194
389
+ 2023-11-06 06:33:25.378639: Pseudo dice [0.8511]
390
+ 2023-11-06 06:33:25.378727: Epoch time: 675.28 s
391
+ 2023-11-06 06:33:25.378796: Yayy! New best EMA pseudo Dice: 0.8378
392
+ 2023-11-06 06:33:28.743157:
393
+ 2023-11-06 06:33:28.743263: Epoch 46
394
+ 2023-11-06 06:33:28.743382: Current learning rate: 0.00959
395
+ 2023-11-06 06:44:43.864779: train_loss -0.8172
396
+ 2023-11-06 06:44:43.864925: val_loss -0.8076
397
+ 2023-11-06 06:44:43.865001: Pseudo dice [0.8374]
398
+ 2023-11-06 06:44:43.865081: Epoch time: 675.12 s
399
+ 2023-11-06 06:44:45.082992:
400
+ 2023-11-06 06:44:45.083101: Epoch 47
401
+ 2023-11-06 06:44:45.083211: Current learning rate: 0.00958
402
+ 2023-11-06 06:55:59.903310: train_loss -0.8225
403
+ 2023-11-06 06:55:59.903490: val_loss -0.8182
404
+ 2023-11-06 06:55:59.903566: Pseudo dice [0.8473]
405
+ 2023-11-06 06:55:59.903647: Epoch time: 674.82 s
406
+ 2023-11-06 06:55:59.903717: Yayy! New best EMA pseudo Dice: 0.8387
407
+ 2023-11-06 06:56:03.120151:
408
+ 2023-11-06 06:56:03.120266: Epoch 48
409
+ 2023-11-06 06:56:03.120366: Current learning rate: 0.00957
410
+ 2023-11-06 07:07:18.367775: train_loss -0.8177
411
+ 2023-11-06 07:07:18.367926: val_loss -0.809
412
+ 2023-11-06 07:07:18.368003: Pseudo dice [0.834]
413
+ 2023-11-06 07:07:18.368084: Epoch time: 675.25 s
414
+ 2023-11-06 07:07:19.602919:
415
+ 2023-11-06 07:07:19.603036: Epoch 49
416
+ 2023-11-06 07:07:19.603153: Current learning rate: 0.00956
417
+ 2023-11-06 07:18:34.630416: train_loss -0.8252
418
+ 2023-11-06 07:18:34.630569: val_loss -0.8214
419
+ 2023-11-06 07:18:34.630645: Pseudo dice [0.8596]
420
+ 2023-11-06 07:18:34.630736: Epoch time: 675.03 s
421
+ 2023-11-06 07:18:34.913779: Yayy! New best EMA pseudo Dice: 0.8404
422
+ 2023-11-06 07:18:38.290022:
423
+ 2023-11-06 07:18:38.290306: Epoch 50
424
+ 2023-11-06 07:18:38.290472: Current learning rate: 0.00955
425
+ 2023-11-06 07:29:52.160251: train_loss -0.8324
426
+ 2023-11-06 07:29:52.160417: val_loss -0.8225
427
+ 2023-11-06 07:29:52.160571: Pseudo dice [0.8528]
428
+ 2023-11-06 07:29:52.160689: Epoch time: 673.87 s
429
+ 2023-11-06 07:29:52.160758: Yayy! New best EMA pseudo Dice: 0.8416
430
+ 2023-11-06 07:29:55.579350:
431
+ 2023-11-06 07:29:55.579564: Epoch 51
432
+ 2023-11-06 07:29:55.579664: Current learning rate: 0.00954
433
+ 2023-11-06 07:41:10.809812: train_loss -0.8262
434
+ 2023-11-06 07:41:10.809976: val_loss -0.8195
435
+ 2023-11-06 07:41:10.810052: Pseudo dice [0.8512]
436
+ 2023-11-06 07:41:10.810133: Epoch time: 675.23 s
437
+ 2023-11-06 07:41:10.810200: Yayy! New best EMA pseudo Dice: 0.8426
438
+ 2023-11-06 07:41:14.117213:
439
+ 2023-11-06 07:41:14.117320: Epoch 52
440
+ 2023-11-06 07:41:14.117436: Current learning rate: 0.00953
441
+ 2023-11-06 07:52:29.440799: train_loss -0.8204
442
+ 2023-11-06 07:52:29.440960: val_loss -0.8119
443
+ 2023-11-06 07:52:29.441036: Pseudo dice [0.8433]
444
+ 2023-11-06 07:52:29.441117: Epoch time: 675.32 s
445
+ 2023-11-06 07:52:29.441186: Yayy! New best EMA pseudo Dice: 0.8427
446
+ 2023-11-06 07:52:32.941524:
447
+ 2023-11-06 07:52:32.941636: Epoch 53
448
+ 2023-11-06 07:52:32.941733: Current learning rate: 0.00952
449
+ 2023-11-06 08:03:48.102913: train_loss -0.8276
450
+ 2023-11-06 08:03:48.103097: val_loss -0.8167
451
+ 2023-11-06 08:03:48.103188: Pseudo dice [0.861]
452
+ 2023-11-06 08:03:48.103277: Epoch time: 675.16 s
453
+ 2023-11-06 08:03:48.103354: Yayy! New best EMA pseudo Dice: 0.8445
454
+ 2023-11-06 08:03:51.334111:
455
+ 2023-11-06 08:03:51.334226: Epoch 54
456
+ 2023-11-06 08:03:51.334339: Current learning rate: 0.00951
457
+ 2023-11-06 08:15:06.513220: train_loss -0.8313
458
+ 2023-11-06 08:15:06.513460: val_loss -0.8093
459
+ 2023-11-06 08:15:06.513551: Pseudo dice [0.8479]
460
+ 2023-11-06 08:15:06.513641: Epoch time: 675.18 s
461
+ 2023-11-06 08:15:06.513718: Yayy! New best EMA pseudo Dice: 0.8448
462
+ 2023-11-06 08:15:09.747582:
463
+ 2023-11-06 08:15:09.747695: Epoch 55
464
+ 2023-11-06 08:15:09.747810: Current learning rate: 0.0095
465
+ 2023-11-06 08:26:25.128954: train_loss -0.8234
466
+ 2023-11-06 08:26:25.129091: val_loss -0.8087
467
+ 2023-11-06 08:26:25.129179: Pseudo dice [0.8479]
468
+ 2023-11-06 08:26:25.129266: Epoch time: 675.38 s
469
+ 2023-11-06 08:26:25.129333: Yayy! New best EMA pseudo Dice: 0.8451
470
+ 2023-11-06 08:26:28.348276:
471
+ 2023-11-06 08:26:28.348374: Epoch 56
472
+ 2023-11-06 08:26:28.348496: Current learning rate: 0.00949
473
+ 2023-11-06 08:37:43.825908: train_loss -0.8124
474
+ 2023-11-06 08:37:43.826058: val_loss -0.8055
475
+ 2023-11-06 08:37:43.826153: Pseudo dice [0.8376]
476
+ 2023-11-06 08:37:43.826247: Epoch time: 675.48 s
477
+ 2023-11-06 08:37:45.064820:
478
+ 2023-11-06 08:37:45.064925: Epoch 57
479
+ 2023-11-06 08:37:45.065036: Current learning rate: 0.00949
480
+ 2023-11-06 08:49:00.095835: train_loss -0.8276
481
+ 2023-11-06 08:49:00.096008: val_loss -0.8191
482
+ 2023-11-06 08:49:00.096088: Pseudo dice [0.8536]
483
+ 2023-11-06 08:49:00.096174: Epoch time: 675.03 s
484
+ 2023-11-06 08:49:00.096246: Yayy! New best EMA pseudo Dice: 0.8453
485
+ 2023-11-06 08:49:03.525604:
486
+ 2023-11-06 08:49:03.525789: Epoch 58
487
+ 2023-11-06 08:49:03.525964: Current learning rate: 0.00948
488
+ 2023-11-06 09:00:18.875708: train_loss -0.8285
489
+ 2023-11-06 09:00:18.875870: val_loss -0.8237
490
+ 2023-11-06 09:00:18.875946: Pseudo dice [0.8581]
491
+ 2023-11-06 09:00:18.876027: Epoch time: 675.35 s
492
+ 2023-11-06 09:00:18.876095: Yayy! New best EMA pseudo Dice: 0.8466
493
+ 2023-11-06 09:00:22.098460:
494
+ 2023-11-06 09:00:22.098590: Epoch 59
495
+ 2023-11-06 09:00:22.098714: Current learning rate: 0.00947
496
+ 2023-11-06 09:11:37.533084: train_loss -0.8272
497
+ 2023-11-06 09:11:37.533242: val_loss -0.791
498
+ 2023-11-06 09:11:37.533318: Pseudo dice [0.8158]
499
+ 2023-11-06 09:11:37.533400: Epoch time: 675.44 s
500
+ 2023-11-06 09:11:38.800722:
501
+ 2023-11-06 09:11:38.800860: Epoch 60
502
+ 2023-11-06 09:11:38.800962: Current learning rate: 0.00946
503
+ 2023-11-06 09:22:53.990348: train_loss -0.8271
504
+ 2023-11-06 09:22:53.990512: val_loss -0.8294
505
+ 2023-11-06 09:22:53.990592: Pseudo dice [0.8748]
506
+ 2023-11-06 09:22:53.990684: Epoch time: 675.19 s
507
+ 2023-11-06 09:22:53.990766: Yayy! New best EMA pseudo Dice: 0.8466
508
+ 2023-11-06 09:22:57.349074:
509
+ 2023-11-06 09:22:57.349289: Epoch 61
510
+ 2023-11-06 09:22:57.349463: Current learning rate: 0.00945
511
+ 2023-11-06 09:34:12.470989: train_loss -0.8302
512
+ 2023-11-06 09:34:12.471159: val_loss -0.8152
513
+ 2023-11-06 09:34:12.471240: Pseudo dice [0.8533]
514
+ 2023-11-06 09:34:12.471326: Epoch time: 675.12 s
515
+ 2023-11-06 09:34:12.471398: Yayy! New best EMA pseudo Dice: 0.8473
516
+ 2023-11-06 09:34:15.608224:
517
+ 2023-11-06 09:34:15.608330: Epoch 62
518
+ 2023-11-06 09:34:15.608428: Current learning rate: 0.00944
519
+ 2023-11-06 09:45:30.830228: train_loss -0.8108
520
+ 2023-11-06 09:45:30.830392: val_loss -0.8124
521
+ 2023-11-06 09:45:30.830467: Pseudo dice [0.8537]
522
+ 2023-11-06 09:45:30.830546: Epoch time: 675.22 s
523
+ 2023-11-06 09:45:30.830615: Yayy! New best EMA pseudo Dice: 0.8479
524
+ 2023-11-06 09:45:33.973119:
525
+ 2023-11-06 09:45:33.973341: Epoch 63
526
+ 2023-11-06 09:45:33.973521: Current learning rate: 0.00943
527
+ 2023-11-06 09:56:48.860524: train_loss -0.8232
528
+ 2023-11-06 09:56:48.860660: val_loss -0.7923
529
+ 2023-11-06 09:56:48.860746: Pseudo dice [0.8356]
530
+ 2023-11-06 09:56:48.860831: Epoch time: 674.89 s
531
+ 2023-11-06 09:56:50.298269:
532
+ 2023-11-06 09:56:50.298393: Epoch 64
533
+ 2023-11-06 09:56:50.298507: Current learning rate: 0.00942
534
+ 2023-11-06 10:08:05.669342: train_loss -0.8194
535
+ 2023-11-06 10:08:05.669511: val_loss -0.8066
536
+ 2023-11-06 10:08:05.669608: Pseudo dice [0.8408]
537
+ 2023-11-06 10:08:05.669699: Epoch time: 675.37 s
538
+ 2023-11-06 10:08:06.933493:
539
+ 2023-11-06 10:08:06.933608: Epoch 65
540
+ 2023-11-06 10:08:06.933718: Current learning rate: 0.00941
541
+ 2023-11-06 10:19:22.076491: train_loss -0.8295
542
+ 2023-11-06 10:19:22.076648: val_loss -0.8209
543
+ 2023-11-06 10:19:22.076744: Pseudo dice [0.8535]
544
+ 2023-11-06 10:19:22.076838: Epoch time: 675.14 s
545
+ 2023-11-06 10:19:23.367105:
546
+ 2023-11-06 10:19:23.367233: Epoch 66
547
+ 2023-11-06 10:19:23.367350: Current learning rate: 0.0094
548
+ 2023-11-06 10:30:38.725112: train_loss -0.825
549
+ 2023-11-06 10:30:38.725279: val_loss -0.8273
550
+ 2023-11-06 10:30:38.725372: Pseudo dice [0.8589]
551
+ 2023-11-06 10:30:38.725462: Epoch time: 675.36 s
552
+ 2023-11-06 10:30:38.725539: Yayy! New best EMA pseudo Dice: 0.8481
553
+ 2023-11-06 10:30:42.055256:
554
+ 2023-11-06 10:30:42.055468: Epoch 67
555
+ 2023-11-06 10:30:42.055651: Current learning rate: 0.00939
556
+ 2023-11-06 10:41:57.480700: train_loss -0.8361
557
+ 2023-11-06 10:41:57.480846: val_loss -0.8078
558
+ 2023-11-06 10:41:57.480921: Pseudo dice [0.8426]
559
+ 2023-11-06 10:41:57.481000: Epoch time: 675.43 s
560
+ 2023-11-06 10:41:58.767543:
561
+ 2023-11-06 10:41:58.767738: Epoch 68
562
+ 2023-11-06 10:41:58.767924: Current learning rate: 0.00939
563
+ 2023-11-06 10:53:14.281489: train_loss -0.835
564
+ 2023-11-06 10:53:14.281637: val_loss -0.8196
565
+ 2023-11-06 10:53:14.281713: Pseudo dice [0.8561]
566
+ 2023-11-06 10:53:14.281793: Epoch time: 675.51 s
567
+ 2023-11-06 10:53:14.281860: Yayy! New best EMA pseudo Dice: 0.8484
568
+ 2023-11-06 10:53:17.731063:
569
+ 2023-11-06 10:53:17.731177: Epoch 69
570
+ 2023-11-06 10:53:17.731296: Current learning rate: 0.00938
571
+ 2023-11-06 11:04:32.854343: train_loss -0.8306
572
+ 2023-11-06 11:04:32.854506: val_loss -0.8266
573
+ 2023-11-06 11:04:32.854598: Pseudo dice [0.8572]
574
+ 2023-11-06 11:04:32.854696: Epoch time: 675.12 s
575
+ 2023-11-06 11:04:32.854775: Yayy! New best EMA pseudo Dice: 0.8493
576
+ 2023-11-06 11:04:36.198146:
577
+ 2023-11-06 11:04:36.198331: Epoch 70
578
+ 2023-11-06 11:04:36.198503: Current learning rate: 0.00937
579
+ 2023-11-06 11:15:51.137854: train_loss -0.8385
580
+ 2023-11-06 11:15:51.138000: val_loss -0.8133
581
+ 2023-11-06 11:15:51.138093: Pseudo dice [0.8431]
582
+ 2023-11-06 11:15:51.138183: Epoch time: 674.94 s
583
+ 2023-11-06 11:15:52.630492:
584
+ 2023-11-06 11:15:52.630608: Epoch 71
585
+ 2023-11-06 11:15:52.630735: Current learning rate: 0.00936
586
+ 2023-11-06 11:27:07.819234: train_loss -0.8339
587
+ 2023-11-06 11:27:07.819484: val_loss -0.8214
588
+ 2023-11-06 11:27:07.819611: Pseudo dice [0.8487]
589
+ 2023-11-06 11:27:07.819704: Epoch time: 675.19 s
590
+ 2023-11-06 11:27:09.109598:
591
+ 2023-11-06 11:27:09.109764: Epoch 72
592
+ 2023-11-06 11:27:09.109937: Current learning rate: 0.00935
593
+ 2023-11-06 11:38:24.488279: train_loss -0.84
594
+ 2023-11-06 11:38:24.488451: val_loss -0.8316
595
+ 2023-11-06 11:38:24.488527: Pseudo dice [0.8615]
596
+ 2023-11-06 11:38:24.488606: Epoch time: 675.38 s
597
+ 2023-11-06 11:38:24.488675: Yayy! New best EMA pseudo Dice: 0.8499
598
+ 2023-11-06 11:38:27.703335:
599
+ 2023-11-06 11:38:27.703459: Epoch 73
600
+ 2023-11-06 11:38:27.703562: Current learning rate: 0.00934
601
+ 2023-11-06 11:49:43.036126: train_loss -0.8416
602
+ 2023-11-06 11:49:43.036298: val_loss -0.8257
603
+ 2023-11-06 11:49:43.036396: Pseudo dice [0.8603]
604
+ 2023-11-06 11:49:43.036489: Epoch time: 675.33 s
605
+ 2023-11-06 11:49:43.036568: Yayy! New best EMA pseudo Dice: 0.851
606
+ 2023-11-06 11:49:46.209569:
607
+ 2023-11-06 11:49:46.209720: Epoch 74
608
+ 2023-11-06 11:49:46.209849: Current learning rate: 0.00933
609
+ 2023-11-06 12:01:01.503134: train_loss -0.8418
610
+ 2023-11-06 12:01:01.503297: val_loss -0.8301
611
+ 2023-11-06 12:01:01.503382: Pseudo dice [0.8594]
612
+ 2023-11-06 12:01:01.503466: Epoch time: 675.29 s
613
+ 2023-11-06 12:01:01.503535: Yayy! New best EMA pseudo Dice: 0.8518
614
+ 2023-11-06 12:01:04.783809:
615
+ 2023-11-06 12:01:04.783910: Epoch 75
616
+ 2023-11-06 12:01:04.784027: Current learning rate: 0.00932
617
+ 2023-11-06 12:12:19.949506: train_loss -0.8463
618
+ 2023-11-06 12:12:19.949670: val_loss -0.8123
619
+ 2023-11-06 12:12:19.949746: Pseudo dice [0.8351]
620
+ 2023-11-06 12:12:19.949828: Epoch time: 675.17 s
621
+ 2023-11-06 12:12:21.242239:
622
+ 2023-11-06 12:12:21.242361: Epoch 76
623
+ 2023-11-06 12:12:21.242465: Current learning rate: 0.00931
624
+ 2023-11-06 12:23:36.276375: train_loss -0.8364
625
+ 2023-11-06 12:23:36.276535: val_loss -0.8107
626
+ 2023-11-06 12:23:36.276609: Pseudo dice [0.8454]
627
+ 2023-11-06 12:23:36.276692: Epoch time: 675.03 s
628
+ 2023-11-06 12:23:37.558540:
629
+ 2023-11-06 12:23:37.558640: Epoch 77
630
+ 2023-11-06 12:23:37.558762: Current learning rate: 0.0093
631
+ 2023-11-06 12:34:52.820284: train_loss -0.8417
632
+ 2023-11-06 12:34:52.820433: val_loss -0.8144
633
+ 2023-11-06 12:34:52.820513: Pseudo dice [0.8607]
634
+ 2023-11-06 12:34:52.820607: Epoch time: 675.26 s
635
+ 2023-11-06 12:34:54.330649:
636
+ 2023-11-06 12:34:54.330824: Epoch 78
637
+ 2023-11-06 12:34:54.330933: Current learning rate: 0.0093
638
+ 2023-11-06 12:46:09.541902: train_loss -0.8413
639
+ 2023-11-06 12:46:09.542046: val_loss -0.8263
640
+ 2023-11-06 12:46:09.542131: Pseudo dice [0.8589]
641
+ 2023-11-06 12:46:09.542212: Epoch time: 675.21 s
642
+ 2023-11-06 12:46:10.856498:
643
+ 2023-11-06 12:46:10.856613: Epoch 79
644
+ 2023-11-06 12:46:10.856715: Current learning rate: 0.00929
645
+ 2023-11-06 12:57:25.954721: train_loss -0.8449
646
+ 2023-11-06 12:57:25.954871: val_loss -0.8188
647
+ 2023-11-06 12:57:25.954963: Pseudo dice [0.8579]
648
+ 2023-11-06 12:57:25.955053: Epoch time: 675.1 s
649
+ 2023-11-06 12:57:25.955130: Yayy! New best EMA pseudo Dice: 0.8522
650
+ 2023-11-06 12:57:29.184287:
651
+ 2023-11-06 12:57:29.184399: Epoch 80
652
+ 2023-11-06 12:57:29.184514: Current learning rate: 0.00928
653
+ 2023-11-06 13:08:44.293826: train_loss -0.831
654
+ 2023-11-06 13:08:44.293987: val_loss -0.825
655
+ 2023-11-06 13:08:44.294063: Pseudo dice [0.8536]
656
+ 2023-11-06 13:08:44.294145: Epoch time: 675.11 s
657
+ 2023-11-06 13:08:44.294213: Yayy! New best EMA pseudo Dice: 0.8523
658
+ 2023-11-06 13:08:47.574882:
659
+ 2023-11-06 13:08:47.574989: Epoch 81
660
+ 2023-11-06 13:08:47.575088: Current learning rate: 0.00927
Dataset721_TSPrimeCTVP/nnUNetTrainer__nnUNetPlans__3d_lowres/plans.json ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Dataset721_TSPrimeCTVP",
3
+ "plans_name": "nnUNetPlans",
4
+ "original_median_spacing_after_transp": [
5
+ 2.5,
6
+ 1.269531011581421,
7
+ 1.269531011581421
8
+ ],
9
+ "original_median_shape_after_transp": [
10
+ 241,
11
+ 512,
12
+ 512
13
+ ],
14
+ "image_reader_writer": "SimpleITKIO",
15
+ "transpose_forward": [
16
+ 0,
17
+ 1,
18
+ 2
19
+ ],
20
+ "transpose_backward": [
21
+ 0,
22
+ 1,
23
+ 2
24
+ ],
25
+ "configurations": {
26
+ "2d": {
27
+ "data_identifier": "nnUNetPlans_2d",
28
+ "preprocessor_name": "DefaultPreprocessor",
29
+ "batch_size": 12,
30
+ "patch_size": [
31
+ 512,
32
+ 512
33
+ ],
34
+ "median_image_size_in_voxels": [
35
+ 512.0,
36
+ 512.0
37
+ ],
38
+ "spacing": [
39
+ 1.269531011581421,
40
+ 1.269531011581421
41
+ ],
42
+ "normalization_schemes": [
43
+ "CTNormalization"
44
+ ],
45
+ "use_mask_for_norm": [
46
+ false
47
+ ],
48
+ "UNet_class_name": "PlainConvUNet",
49
+ "UNet_base_num_features": 32,
50
+ "n_conv_per_stage_encoder": [
51
+ 2,
52
+ 2,
53
+ 2,
54
+ 2,
55
+ 2,
56
+ 2,
57
+ 2,
58
+ 2
59
+ ],
60
+ "n_conv_per_stage_decoder": [
61
+ 2,
62
+ 2,
63
+ 2,
64
+ 2,
65
+ 2,
66
+ 2,
67
+ 2
68
+ ],
69
+ "num_pool_per_axis": [
70
+ 7,
71
+ 7
72
+ ],
73
+ "pool_op_kernel_sizes": [
74
+ [
75
+ 1,
76
+ 1
77
+ ],
78
+ [
79
+ 2,
80
+ 2
81
+ ],
82
+ [
83
+ 2,
84
+ 2
85
+ ],
86
+ [
87
+ 2,
88
+ 2
89
+ ],
90
+ [
91
+ 2,
92
+ 2
93
+ ],
94
+ [
95
+ 2,
96
+ 2
97
+ ],
98
+ [
99
+ 2,
100
+ 2
101
+ ],
102
+ [
103
+ 2,
104
+ 2
105
+ ]
106
+ ],
107
+ "conv_kernel_sizes": [
108
+ [
109
+ 3,
110
+ 3
111
+ ],
112
+ [
113
+ 3,
114
+ 3
115
+ ],
116
+ [
117
+ 3,
118
+ 3
119
+ ],
120
+ [
121
+ 3,
122
+ 3
123
+ ],
124
+ [
125
+ 3,
126
+ 3
127
+ ],
128
+ [
129
+ 3,
130
+ 3
131
+ ],
132
+ [
133
+ 3,
134
+ 3
135
+ ],
136
+ [
137
+ 3,
138
+ 3
139
+ ]
140
+ ],
141
+ "unet_max_num_features": 512,
142
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
143
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
144
+ "resampling_fn_data_kwargs": {
145
+ "is_seg": false,
146
+ "order": 3,
147
+ "order_z": 0,
148
+ "force_separate_z": null
149
+ },
150
+ "resampling_fn_seg_kwargs": {
151
+ "is_seg": true,
152
+ "order": 1,
153
+ "order_z": 0,
154
+ "force_separate_z": null
155
+ },
156
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
157
+ "resampling_fn_probabilities_kwargs": {
158
+ "is_seg": false,
159
+ "order": 1,
160
+ "order_z": 0,
161
+ "force_separate_z": null
162
+ },
163
+ "batch_dice": true
164
+ },
165
+ "3d_lowres": {
166
+ "data_identifier": "nnUNetPlans_3d_lowres",
167
+ "preprocessor_name": "DefaultPreprocessor",
168
+ "batch_size": 2,
169
+ "patch_size": [
170
+ 80,
171
+ 192,
172
+ 160
173
+ ],
174
+ "median_image_size_in_voxels": [
175
+ 130,
176
+ 275,
177
+ 275
178
+ ],
179
+ "spacing": [
180
+ 4.650736429273743,
181
+ 2.361701649461784,
182
+ 2.361701649461784
183
+ ],
184
+ "normalization_schemes": [
185
+ "CTNormalization"
186
+ ],
187
+ "use_mask_for_norm": [
188
+ false
189
+ ],
190
+ "UNet_class_name": "PlainConvUNet",
191
+ "UNet_base_num_features": 32,
192
+ "n_conv_per_stage_encoder": [
193
+ 2,
194
+ 2,
195
+ 2,
196
+ 2,
197
+ 2,
198
+ 2
199
+ ],
200
+ "n_conv_per_stage_decoder": [
201
+ 2,
202
+ 2,
203
+ 2,
204
+ 2,
205
+ 2
206
+ ],
207
+ "num_pool_per_axis": [
208
+ 4,
209
+ 5,
210
+ 5
211
+ ],
212
+ "pool_op_kernel_sizes": [
213
+ [
214
+ 1,
215
+ 1,
216
+ 1
217
+ ],
218
+ [
219
+ 2,
220
+ 2,
221
+ 2
222
+ ],
223
+ [
224
+ 2,
225
+ 2,
226
+ 2
227
+ ],
228
+ [
229
+ 2,
230
+ 2,
231
+ 2
232
+ ],
233
+ [
234
+ 2,
235
+ 2,
236
+ 2
237
+ ],
238
+ [
239
+ 1,
240
+ 2,
241
+ 2
242
+ ]
243
+ ],
244
+ "conv_kernel_sizes": [
245
+ [
246
+ 3,
247
+ 3,
248
+ 3
249
+ ],
250
+ [
251
+ 3,
252
+ 3,
253
+ 3
254
+ ],
255
+ [
256
+ 3,
257
+ 3,
258
+ 3
259
+ ],
260
+ [
261
+ 3,
262
+ 3,
263
+ 3
264
+ ],
265
+ [
266
+ 3,
267
+ 3,
268
+ 3
269
+ ],
270
+ [
271
+ 3,
272
+ 3,
273
+ 3
274
+ ]
275
+ ],
276
+ "unet_max_num_features": 320,
277
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
278
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
279
+ "resampling_fn_data_kwargs": {
280
+ "is_seg": false,
281
+ "order": 3,
282
+ "order_z": 0,
283
+ "force_separate_z": null
284
+ },
285
+ "resampling_fn_seg_kwargs": {
286
+ "is_seg": true,
287
+ "order": 1,
288
+ "order_z": 0,
289
+ "force_separate_z": null
290
+ },
291
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
292
+ "resampling_fn_probabilities_kwargs": {
293
+ "is_seg": false,
294
+ "order": 1,
295
+ "order_z": 0,
296
+ "force_separate_z": null
297
+ },
298
+ "batch_dice": false,
299
+ "next_stage": "3d_cascade_fullres"
300
+ },
301
+ "3d_fullres": {
302
+ "data_identifier": "nnUNetPlans_3d_fullres",
303
+ "preprocessor_name": "DefaultPreprocessor",
304
+ "batch_size": 2,
305
+ "patch_size": [
306
+ 80,
307
+ 192,
308
+ 160
309
+ ],
310
+ "median_image_size_in_voxels": [
311
+ 241.0,
312
+ 512.0,
313
+ 512.0
314
+ ],
315
+ "spacing": [
316
+ 2.5,
317
+ 1.269531011581421,
318
+ 1.269531011581421
319
+ ],
320
+ "normalization_schemes": [
321
+ "CTNormalization"
322
+ ],
323
+ "use_mask_for_norm": [
324
+ false
325
+ ],
326
+ "UNet_class_name": "PlainConvUNet",
327
+ "UNet_base_num_features": 32,
328
+ "n_conv_per_stage_encoder": [
329
+ 2,
330
+ 2,
331
+ 2,
332
+ 2,
333
+ 2,
334
+ 2
335
+ ],
336
+ "n_conv_per_stage_decoder": [
337
+ 2,
338
+ 2,
339
+ 2,
340
+ 2,
341
+ 2
342
+ ],
343
+ "num_pool_per_axis": [
344
+ 4,
345
+ 5,
346
+ 5
347
+ ],
348
+ "pool_op_kernel_sizes": [
349
+ [
350
+ 1,
351
+ 1,
352
+ 1
353
+ ],
354
+ [
355
+ 2,
356
+ 2,
357
+ 2
358
+ ],
359
+ [
360
+ 2,
361
+ 2,
362
+ 2
363
+ ],
364
+ [
365
+ 2,
366
+ 2,
367
+ 2
368
+ ],
369
+ [
370
+ 2,
371
+ 2,
372
+ 2
373
+ ],
374
+ [
375
+ 1,
376
+ 2,
377
+ 2
378
+ ]
379
+ ],
380
+ "conv_kernel_sizes": [
381
+ [
382
+ 3,
383
+ 3,
384
+ 3
385
+ ],
386
+ [
387
+ 3,
388
+ 3,
389
+ 3
390
+ ],
391
+ [
392
+ 3,
393
+ 3,
394
+ 3
395
+ ],
396
+ [
397
+ 3,
398
+ 3,
399
+ 3
400
+ ],
401
+ [
402
+ 3,
403
+ 3,
404
+ 3
405
+ ],
406
+ [
407
+ 3,
408
+ 3,
409
+ 3
410
+ ]
411
+ ],
412
+ "unet_max_num_features": 320,
413
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
414
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
415
+ "resampling_fn_data_kwargs": {
416
+ "is_seg": false,
417
+ "order": 3,
418
+ "order_z": 0,
419
+ "force_separate_z": null
420
+ },
421
+ "resampling_fn_seg_kwargs": {
422
+ "is_seg": true,
423
+ "order": 1,
424
+ "order_z": 0,
425
+ "force_separate_z": null
426
+ },
427
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
428
+ "resampling_fn_probabilities_kwargs": {
429
+ "is_seg": false,
430
+ "order": 1,
431
+ "order_z": 0,
432
+ "force_separate_z": null
433
+ },
434
+ "batch_dice": true
435
+ },
436
+ "3d_cascade_fullres": {
437
+ "inherits_from": "3d_fullres",
438
+ "previous_stage": "3d_lowres"
439
+ }
440
+ },
441
+ "experiment_planner_used": "ExperimentPlanner",
442
+ "label_manager": "LabelManager",
443
+ "foreground_intensity_properties_per_channel": {
444
+ "0": {
445
+ "max": 882.0,
446
+ "mean": 45.35713577270508,
447
+ "median": 48.0,
448
+ "min": -118.0,
449
+ "percentile_00_5": -48.0,
450
+ "percentile_99_5": 103.0,
451
+ "std": 26.203161239624023
452
+ }
453
+ }
454
+ }
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "CT"
4
+ },
5
+ "labels": {
6
+ "background": 0,
7
+ "Ctvn": 1
8
+ },
9
+ "numTraining": 60,
10
+ "file_ending": ".nii.gz",
11
+ "numTest": 0
12
+ }
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "foreground_intensity_properties_per_channel": {
3
+ "0": {
4
+ "max": 1399.0,
5
+ "mean": -13.421175003051758,
6
+ "median": -38.0,
7
+ "min": -956.0,
8
+ "percentile_00_5": -119.0,
9
+ "percentile_99_5": 213.0,
10
+ "std": 80.46653747558594
11
+ }
12
+ },
13
+ "median_relative_size_after_cropping": 1.0,
14
+ "shapes_after_crop": [
15
+ [
16
+ 230,
17
+ 512,
18
+ 512
19
+ ],
20
+ [
21
+ 240,
22
+ 512,
23
+ 512
24
+ ],
25
+ [
26
+ 260,
27
+ 512,
28
+ 512
29
+ ],
30
+ [
31
+ 215,
32
+ 512,
33
+ 512
34
+ ],
35
+ [
36
+ 260,
37
+ 512,
38
+ 512
39
+ ],
40
+ [
41
+ 220,
42
+ 512,
43
+ 512
44
+ ],
45
+ [
46
+ 210,
47
+ 512,
48
+ 512
49
+ ],
50
+ [
51
+ 240,
52
+ 512,
53
+ 512
54
+ ],
55
+ [
56
+ 265,
57
+ 512,
58
+ 512
59
+ ],
60
+ [
61
+ 229,
62
+ 512,
63
+ 512
64
+ ],
65
+ [
66
+ 230,
67
+ 512,
68
+ 512
69
+ ],
70
+ [
71
+ 243,
72
+ 512,
73
+ 512
74
+ ],
75
+ [
76
+ 230,
77
+ 512,
78
+ 512
79
+ ],
80
+ [
81
+ 250,
82
+ 512,
83
+ 512
84
+ ],
85
+ [
86
+ 250,
87
+ 512,
88
+ 512
89
+ ],
90
+ [
91
+ 245,
92
+ 512,
93
+ 512
94
+ ],
95
+ [
96
+ 235,
97
+ 512,
98
+ 512
99
+ ],
100
+ [
101
+ 250,
102
+ 512,
103
+ 512
104
+ ],
105
+ [
106
+ 242,
107
+ 512,
108
+ 512
109
+ ],
110
+ [
111
+ 241,
112
+ 512,
113
+ 512
114
+ ],
115
+ [
116
+ 210,
117
+ 512,
118
+ 512
119
+ ],
120
+ [
121
+ 255,
122
+ 512,
123
+ 512
124
+ ],
125
+ [
126
+ 246,
127
+ 512,
128
+ 512
129
+ ],
130
+ [
131
+ 240,
132
+ 512,
133
+ 512
134
+ ],
135
+ [
136
+ 245,
137
+ 512,
138
+ 512
139
+ ],
140
+ [
141
+ 250,
142
+ 512,
143
+ 512
144
+ ],
145
+ [
146
+ 249,
147
+ 512,
148
+ 512
149
+ ],
150
+ [
151
+ 210,
152
+ 512,
153
+ 512
154
+ ],
155
+ [
156
+ 210,
157
+ 512,
158
+ 512
159
+ ],
160
+ [
161
+ 244,
162
+ 512,
163
+ 512
164
+ ],
165
+ [
166
+ 230,
167
+ 512,
168
+ 512
169
+ ],
170
+ [
171
+ 235,
172
+ 512,
173
+ 512
174
+ ],
175
+ [
176
+ 260,
177
+ 512,
178
+ 512
179
+ ],
180
+ [
181
+ 241,
182
+ 512,
183
+ 512
184
+ ],
185
+ [
186
+ 220,
187
+ 512,
188
+ 512
189
+ ],
190
+ [
191
+ 240,
192
+ 512,
193
+ 512
194
+ ],
195
+ [
196
+ 190,
197
+ 512,
198
+ 512
199
+ ],
200
+ [
201
+ 255,
202
+ 512,
203
+ 512
204
+ ],
205
+ [
206
+ 230,
207
+ 512,
208
+ 512
209
+ ],
210
+ [
211
+ 255,
212
+ 512,
213
+ 512
214
+ ],
215
+ [
216
+ 236,
217
+ 512,
218
+ 512
219
+ ],
220
+ [
221
+ 241,
222
+ 512,
223
+ 512
224
+ ],
225
+ [
226
+ 220,
227
+ 512,
228
+ 512
229
+ ],
230
+ [
231
+ 241,
232
+ 512,
233
+ 512
234
+ ],
235
+ [
236
+ 245,
237
+ 512,
238
+ 512
239
+ ],
240
+ [
241
+ 241,
242
+ 512,
243
+ 512
244
+ ],
245
+ [
246
+ 250,
247
+ 512,
248
+ 512
249
+ ],
250
+ [
251
+ 210,
252
+ 512,
253
+ 512
254
+ ],
255
+ [
256
+ 250,
257
+ 512,
258
+ 512
259
+ ],
260
+ [
261
+ 266,
262
+ 512,
263
+ 512
264
+ ],
265
+ [
266
+ 220,
267
+ 512,
268
+ 512
269
+ ],
270
+ [
271
+ 230,
272
+ 512,
273
+ 512
274
+ ],
275
+ [
276
+ 280,
277
+ 512,
278
+ 512
279
+ ],
280
+ [
281
+ 260,
282
+ 512,
283
+ 512
284
+ ],
285
+ [
286
+ 245,
287
+ 512,
288
+ 512
289
+ ],
290
+ [
291
+ 220,
292
+ 512,
293
+ 512
294
+ ],
295
+ [
296
+ 240,
297
+ 512,
298
+ 512
299
+ ],
300
+ [
301
+ 250,
302
+ 512,
303
+ 512
304
+ ],
305
+ [
306
+ 226,
307
+ 512,
308
+ 512
309
+ ],
310
+ [
311
+ 240,
312
+ 512,
313
+ 512
314
+ ]
315
+ ],
316
+ "spacings": [
317
+ [
318
+ 2.5,
319
+ 1.269531011581421,
320
+ 1.269531011581421
321
+ ],
322
+ [
323
+ 2.5,
324
+ 1.269531011581421,
325
+ 1.269531011581421
326
+ ],
327
+ [
328
+ 2.5,
329
+ 1.269531011581421,
330
+ 1.269531011581421
331
+ ],
332
+ [
333
+ 2.5,
334
+ 1.269531011581421,
335
+ 1.269531011581421
336
+ ],
337
+ [
338
+ 2.5,
339
+ 1.269531011581421,
340
+ 1.269531011581421
341
+ ],
342
+ [
343
+ 2.5,
344
+ 1.269531011581421,
345
+ 1.269531011581421
346
+ ],
347
+ [
348
+ 2.5,
349
+ 1.269531011581421,
350
+ 1.269531011581421
351
+ ],
352
+ [
353
+ 2.5,
354
+ 1.269531011581421,
355
+ 1.269531011581421
356
+ ],
357
+ [
358
+ 2.5,
359
+ 1.269531011581421,
360
+ 1.269531011581421
361
+ ],
362
+ [
363
+ 2.5,
364
+ 1.269531011581421,
365
+ 1.269531011581421
366
+ ],
367
+ [
368
+ 2.5,
369
+ 1.269531011581421,
370
+ 1.269531011581421
371
+ ],
372
+ [
373
+ 2.5,
374
+ 1.269531011581421,
375
+ 1.269531011581421
376
+ ],
377
+ [
378
+ 2.5,
379
+ 1.269531011581421,
380
+ 1.269531011581421
381
+ ],
382
+ [
383
+ 2.5,
384
+ 1.269531011581421,
385
+ 1.269531011581421
386
+ ],
387
+ [
388
+ 2.5,
389
+ 1.269531011581421,
390
+ 1.269531011581421
391
+ ],
392
+ [
393
+ 2.5,
394
+ 1.269531011581421,
395
+ 1.269531011581421
396
+ ],
397
+ [
398
+ 2.5,
399
+ 1.269531011581421,
400
+ 1.269531011581421
401
+ ],
402
+ [
403
+ 2.5,
404
+ 1.269531011581421,
405
+ 1.269531011581421
406
+ ],
407
+ [
408
+ 2.5,
409
+ 1.269531011581421,
410
+ 1.269531011581421
411
+ ],
412
+ [
413
+ 2.5,
414
+ 1.269531011581421,
415
+ 1.269531011581421
416
+ ],
417
+ [
418
+ 2.5,
419
+ 1.269531011581421,
420
+ 1.269531011581421
421
+ ],
422
+ [
423
+ 2.5,
424
+ 1.269531011581421,
425
+ 1.269531011581421
426
+ ],
427
+ [
428
+ 2.5,
429
+ 1.269531011581421,
430
+ 1.269531011581421
431
+ ],
432
+ [
433
+ 2.5,
434
+ 1.269531011581421,
435
+ 1.269531011581421
436
+ ],
437
+ [
438
+ 2.5,
439
+ 1.269531011581421,
440
+ 1.269531011581421
441
+ ],
442
+ [
443
+ 2.5,
444
+ 1.269531011581421,
445
+ 1.269531011581421
446
+ ],
447
+ [
448
+ 2.5,
449
+ 1.269531011581421,
450
+ 1.269531011581421
451
+ ],
452
+ [
453
+ 2.5,
454
+ 1.269531011581421,
455
+ 1.269531011581421
456
+ ],
457
+ [
458
+ 2.5,
459
+ 1.269531011581421,
460
+ 1.269531011581421
461
+ ],
462
+ [
463
+ 2.5,
464
+ 1.269531011581421,
465
+ 1.269531011581421
466
+ ],
467
+ [
468
+ 2.5,
469
+ 1.269531011581421,
470
+ 1.269531011581421
471
+ ],
472
+ [
473
+ 2.5,
474
+ 1.269531011581421,
475
+ 1.269531011581421
476
+ ],
477
+ [
478
+ 2.5,
479
+ 1.269531011581421,
480
+ 1.269531011581421
481
+ ],
482
+ [
483
+ 2.5,
484
+ 1.269531011581421,
485
+ 1.269531011581421
486
+ ],
487
+ [
488
+ 2.5,
489
+ 1.269531011581421,
490
+ 1.269531011581421
491
+ ],
492
+ [
493
+ 2.5,
494
+ 1.269531011581421,
495
+ 1.269531011581421
496
+ ],
497
+ [
498
+ 2.5,
499
+ 1.269531011581421,
500
+ 1.269531011581421
501
+ ],
502
+ [
503
+ 2.5,
504
+ 1.269531011581421,
505
+ 1.269531011581421
506
+ ],
507
+ [
508
+ 2.5,
509
+ 1.269531011581421,
510
+ 1.269531011581421
511
+ ],
512
+ [
513
+ 2.5,
514
+ 1.269531011581421,
515
+ 1.269531011581421
516
+ ],
517
+ [
518
+ 2.5,
519
+ 1.269531011581421,
520
+ 1.269531011581421
521
+ ],
522
+ [
523
+ 2.5,
524
+ 1.269531011581421,
525
+ 1.269531011581421
526
+ ],
527
+ [
528
+ 2.5,
529
+ 1.269531011581421,
530
+ 1.269531011581421
531
+ ],
532
+ [
533
+ 2.5,
534
+ 1.269531011581421,
535
+ 1.269531011581421
536
+ ],
537
+ [
538
+ 2.5,
539
+ 1.269531011581421,
540
+ 1.269531011581421
541
+ ],
542
+ [
543
+ 2.5,
544
+ 1.269531011581421,
545
+ 1.269531011581421
546
+ ],
547
+ [
548
+ 2.5,
549
+ 1.269531011581421,
550
+ 1.269531011581421
551
+ ],
552
+ [
553
+ 2.5,
554
+ 1.269531011581421,
555
+ 1.269531011581421
556
+ ],
557
+ [
558
+ 2.5,
559
+ 1.269531011581421,
560
+ 1.269531011581421
561
+ ],
562
+ [
563
+ 2.5,
564
+ 1.269531011581421,
565
+ 1.269531011581421
566
+ ],
567
+ [
568
+ 2.5,
569
+ 1.269531011581421,
570
+ 1.269531011581421
571
+ ],
572
+ [
573
+ 2.5,
574
+ 1.269531011581421,
575
+ 1.269531011581421
576
+ ],
577
+ [
578
+ 2.5,
579
+ 1.269531011581421,
580
+ 1.269531011581421
581
+ ],
582
+ [
583
+ 2.5,
584
+ 1.269531011581421,
585
+ 1.269531011581421
586
+ ],
587
+ [
588
+ 2.5,
589
+ 1.269531011581421,
590
+ 1.269531011581421
591
+ ],
592
+ [
593
+ 2.5,
594
+ 1.269531011581421,
595
+ 1.269531011581421
596
+ ],
597
+ [
598
+ 2.5,
599
+ 1.269531011581421,
600
+ 1.269531011581421
601
+ ],
602
+ [
603
+ 2.5,
604
+ 1.269531011581421,
605
+ 1.269531011581421
606
+ ],
607
+ [
608
+ 2.5,
609
+ 1.269531011581421,
610
+ 1.269531011581421
611
+ ],
612
+ [
613
+ 2.5,
614
+ 1.269531011581421,
615
+ 1.269531011581421
616
+ ]
617
+ ]
618
+ }
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a992f2e00a0d7cdc80021563fea0efdce7baf75e616531d7afa8286effb0fb1a
3
+ size 246720930
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f716be9baa2085228674a5398b3cacf26d7192ce45b752fcec7f8fc20f78e904
3
+ size 246759422
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_best_ema": "None",
3
+ "batch_size": "2",
4
+ "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}",
5
+ "configuration_name": "3d_fullres",
6
+ "cudnn_version": 8700,
7
+ "current_epoch": "0",
8
+ "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f34c353bd90>",
9
+ "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f34c4245890>",
10
+ "dataloader_train.num_processes": "12",
11
+ "dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [80, 192, 160], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-0.5235987755982988, 0.5235987755982988), angle_y = (-0.5235987755982988, 0.5235987755982988), angle_z = (-0.5235987755982988, 0.5235987755982988), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
+ "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f34c304b9d0>",
13
+ "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f34c3e88a90>",
14
+ "dataloader_val.num_processes": "6",
15
+ "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
+ "dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvn': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}",
17
+ "device": "cuda:0",
18
+ "disable_checkpointing": "False",
19
+ "fold": "0",
20
+ "folder_with_segs_from_previous_stage": "None",
21
+ "gpu_name": "NVIDIA RTX A4000",
22
+ "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7f34c3c8e5d0>",
23
+ "hostname": "surajit-Precision-3660",
24
+ "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
+ "initial_lr": "0.01",
26
+ "is_cascaded": "False",
27
+ "is_ddp": "False",
28
+ "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7f34c42bac50>",
29
+ "local_rank": "0",
30
+ "log_file": "./data/nnUNet_results/Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_1_18_43_12.txt",
31
+ "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7f34c3583210>",
32
+ "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
+ "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7f34c3964390>",
34
+ "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset722_TSPrimeCTVN', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1399.0, 'mean': -13.421175003051758, 'median': -38.0, 'min': -956.0, 'percentile_00_5': -119.0, 'percentile_99_5': 213.0, 'std': 80.46653747558594}}}, 'configuration': '3d_fullres', 'fold': 0, 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'Ctvn': 1}, 'numTraining': 60, 'file_ending': '.nii.gz', 'numTest': 0}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
+ "network": "PlainConvUNet",
36
+ "num_epochs": "1000",
37
+ "num_input_channels": "1",
38
+ "num_iterations_per_epoch": "250",
39
+ "num_val_iterations_per_epoch": "50",
40
+ "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
+ "output_folder": "./data/nnUNet_results/Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0",
42
+ "output_folder_base": "./data/nnUNet_results/Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres",
43
+ "oversample_foreground_percent": "0.33",
44
+ "plans_manager": "{'dataset_name': 'Dataset722_TSPrimeCTVN', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [2.5, 1.269531011581421, 1.269531011581421], 'original_median_shape_after_transp': [241, 512, 512], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 12, 'patch_size': [512, 512], 'median_image_size_in_voxels': [512.0, 512.0], 'spacing': [1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [7, 7], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_lowres': {'data_identifier': 'nnUNetPlans_3d_lowres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [130, 275, 275], 'spacing': [4.650736429273743, 2.361701649461784, 2.361701649461784], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False, 'next_stage': '3d_cascade_fullres'}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [80, 192, 160], 'median_image_size_in_voxels': [241.0, 512.0, 512.0], 'spacing': [2.5, 1.269531011581421, 1.269531011581421], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2], 'num_pool_per_axis': [4, 5, 5], 'pool_op_kernel_sizes': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_cascade_fullres': {'inherits_from': '3d_fullres', 'previous_stage': '3d_lowres'}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1399.0, 'mean': -13.421175003051758, 'median': -38.0, 'min': -956.0, 'percentile_00_5': -119.0, 'percentile_99_5': 213.0, 'std': 80.46653747558594}}}",
45
+ "preprocessed_dataset_folder": "./data/nnUNet_preprocessed/Dataset722_TSPrimeCTVN/nnUNetPlans_3d_fullres",
46
+ "preprocessed_dataset_folder_base": "./data/nnUNet_preprocessed/Dataset722_TSPrimeCTVN",
47
+ "save_every": "50",
48
+ "torch_version": "2.1.0",
49
+ "unpack_dataset": "True",
50
+ "was_initialized": "True",
51
+ "weight_decay": "3e-05"
52
+ }
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png ADDED
Dataset722_TSPrimeCTVN/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_11_1_18_43_12.txt ADDED
The diff for this file is too large to render. See raw diff