prasb commited on
Commit
37d8286
·
verified ·
1 Parent(s): b43e9c1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. my_container_sandbox/.singularity.d/runscript +6 -0
  2. my_container_sandbox/etc/debconf.conf +83 -0
  3. my_container_sandbox/etc/deluser.conf +20 -0
  4. my_container_sandbox/etc/hostname +1 -0
  5. my_container_sandbox/etc/issue.net +1 -0
  6. my_container_sandbox/etc/lsb-release +4 -0
  7. my_container_sandbox/etc/mailcap.order +10 -0
  8. my_container_sandbox/etc/mke2fs.conf +48 -0
  9. my_container_sandbox/etc/os-release +12 -0
  10. my_container_sandbox/etc/passwd +20 -0
  11. my_container_sandbox/etc/screenrc +108 -0
  12. my_container_sandbox/etc/sensors3.conf +536 -0
  13. my_container_sandbox/etc/shadow- +20 -0
  14. my_container_sandbox/etc/subgid +0 -0
  15. my_container_sandbox/etc/subuid +0 -0
  16. my_container_sandbox/workspace/.pip/pip.conf +1 -0
  17. my_container_sandbox/workspace/anaconda3/LICENSE.txt +83 -0
  18. my_container_sandbox/workspace/difftumor/eval2.py +32 -0
  19. my_container_sandbox/workspace/difftumor/organ_mask_access/dataset/dataloader.py +152 -0
  20. my_container_sandbox/workspace/difftumor/organ_mask_access/model/DiNTS.py +672 -0
  21. my_container_sandbox/workspace/difftumor/organ_mask_access/model/Unetpp.py +152 -0
  22. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/DiNTS.cpython-38.pyc +0 -0
  23. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SENet.cpython-37.pyc +0 -0
  24. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SENet.cpython-38.pyc +0 -0
  25. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SwinUNETR.cpython-38.pyc +0 -0
  26. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/Unetpp.cpython-38.pyc +0 -0
  27. my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/Universal_model.cpython-37.pyc +0 -0
  28. my_container_sandbox/workspace/difftumor/organ_mask_access/organ_test.py +135 -0
  29. my_container_sandbox/workspace/difftumor/organ_mask_access/pretrained_weights/.DS_Store +0 -0
  30. my_container_sandbox/workspace/difftumor/organ_mask_access/pretrained_weights/clip_embedding.py +27 -0
  31. my_container_sandbox/workspace/difftumor/organ_mask_access/requirements.txt +7 -0
  32. my_container_sandbox/workspace/difftumor/organ_mask_access/utils/__pycache__/utils.cpython-37.pyc +0 -0
  33. my_container_sandbox/workspace/difftumor/organ_mask_access/utils/label_transfer.py +310 -0
  34. my_container_sandbox/workspace/difftumor/organ_mask_access/utils/utils.py +767 -0
  35. my_container_sandbox/workspace/difftumor/tumor_mask_access/.DS_Store +0 -0
  36. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/__pycache__/task_params.cpython-38.pyc +0 -0
  37. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/calculate_task_params.py +87 -0
  38. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task01/finetune_multi_gpu.sh +23 -0
  39. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task02/finetune.sh +22 -0
  40. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task03/finetune_multi_gpu.sh +24 -0
  41. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/train.sh +19 -0
  42. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/train_multi_gpu.sh +21 -0
  43. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/val_multi_gpu.sh +20 -0
  44. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task05/finetune.sh +22 -0
  45. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task06/finetune.sh +22 -0
  46. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task07/finetune_multi_gpu.sh +28 -0
  47. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task08/finetune_multi_gpu.sh +28 -0
  48. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task02.json +1 -0
  49. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task04.json +0 -0
  50. my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task06.json +1 -0
my_container_sandbox/.singularity.d/runscript ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ cd /workspace/difftumor
4
+ python eval2.py --gpu $GPU
5
+
6
+
my_container_sandbox/etc/debconf.conf ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is the main config file for debconf. It tells debconf where to
2
+ # store data. The format of this file is a set of stanzas. Each stanza
3
+ # except the first sets up a database for debconf to use. For details, see
4
+ # debconf.conf(5) (in the debconf-doc package).
5
+ #
6
+ # So first things first. This first stanza gives the names of two databases.
7
+
8
+ # Debconf will use this database to store the data you enter into it,
9
+ # and some other dynamic data.
10
+ Config: configdb
11
+ # Debconf will use this database to store static template data.
12
+ Templates: templatedb
13
+
14
+ # World-readable, and accepts everything but passwords.
15
+ Name: config
16
+ Driver: File
17
+ Mode: 644
18
+ Reject-Type: password
19
+ Filename: /var/cache/debconf/config.dat
20
+
21
+ # Not world readable (the default), and accepts only passwords.
22
+ Name: passwords
23
+ Driver: File
24
+ Mode: 600
25
+ Backup: false
26
+ Required: false
27
+ Accept-Type: password
28
+ Filename: /var/cache/debconf/passwords.dat
29
+
30
+ # Set up the configdb database. By default, it consists of a stack of two
31
+ # databases, one to hold passwords and one for everything else.
32
+ Name: configdb
33
+ Driver: Stack
34
+ Stack: config, passwords
35
+
36
+ # Set up the templatedb database, which is a single flat text file
37
+ # by default.
38
+ Name: templatedb
39
+ Driver: File
40
+ Mode: 644
41
+ Filename: /var/cache/debconf/templates.dat
42
+
43
+ # Well that was pretty straightforward, and it will be enough for most
44
+ # people's needs, but debconf's database drivers can be used to do much
45
+ # more interesting things. For example, suppose you want to use config
46
+ # data from another host, which is mounted over nfs or perhaps the database
47
+ # is accessed via LDAP. You don't want to write to the remote debconf database,
48
+ # just read from it, so you still need a local database for local changes.
49
+ #
50
+ # A remote NFS mounted database, read-only. It is optional; if debconf
51
+ # fails to use it it will not abort.
52
+ #Name: remotedb
53
+ #Driver: DirTree
54
+ #Directory: /mnt/otherhost/var/cache/debconf/config
55
+ #Readonly: true
56
+ #Required: false
57
+ #
58
+ # A remote LDAP database. It is also read-only. The password is really
59
+ # only necessary if the database is not accessible anonymously.
60
+ # Option KeyByKey instructs the backend to retrieve keys from the LDAP
61
+ # server individually (when they are requested), instead of loading all
62
+ # keys at startup. The default is 0, and should only be enabled if you
63
+ # want to track accesses to individual keys on the LDAP server side.
64
+ #Name: remotedb
65
+ #Driver: LDAP
66
+ #Server: remotehost
67
+ #BaseDN: cn=debconf,dc=domain,dc=com
68
+ #BindDN: uid=admin,dc=domain,dc=com
69
+ #BindPasswd: secret
70
+ #KeyByKey: 0
71
+ #
72
+ # A stack consisting of two databases. Values will be read from
73
+ # the first database in the stack to contain a value. In this example,
74
+ # writes always go to the first database.
75
+ #Name: fulldb
76
+ #Driver: Stack
77
+ #Stack: configdb, remotedb
78
+ #
79
+ # In this example, we'd use Config: fulldb at the top of the file
80
+ # to make it use the combination of the databases.
81
+ #
82
+ # Even more complex and interesting setups are possible, see the
83
+ # debconf.conf(5) page for details.
my_container_sandbox/etc/deluser.conf ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /etc/deluser.conf: `deluser' configuration.
2
+
3
+ # Remove home directory and mail spool when user is removed
4
+ REMOVE_HOME = 0
5
+
6
+ # Remove all files on the system owned by the user to be removed
7
+ REMOVE_ALL_FILES = 0
8
+
9
+ # Backup files before removing them. This options has only an effect if
10
+ # REMOVE_HOME or REMOVE_ALL_FILES is set.
11
+ BACKUP = 0
12
+
13
+ # target directory for the backup file
14
+ BACKUP_TO = "."
15
+
16
+ # delete a group even there are still users in this group
17
+ ONLY_IF_EMPTY = 0
18
+
19
+ # exclude these filesystem types when searching for files of a user to backup
20
+ EXCLUDE_FSTYPES = "(proc|sysfs|usbfs|devpts|tmpfs|afs)"
my_container_sandbox/etc/hostname ADDED
@@ -0,0 +1 @@
 
 
1
+ localhost.localdomain
my_container_sandbox/etc/issue.net ADDED
@@ -0,0 +1 @@
 
 
1
+ Ubuntu 20.04.6 LTS
my_container_sandbox/etc/lsb-release ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ DISTRIB_ID=Ubuntu
2
+ DISTRIB_RELEASE=20.04
3
+ DISTRIB_CODENAME=focal
4
+ DISTRIB_DESCRIPTION="Ubuntu 20.04.6 LTS"
my_container_sandbox/etc/mailcap.order ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ #
3
+ # Mailcap.order: This file allows a system-wide override of MIME program
4
+ # preferences. See the mailcap.order(5) man page for more information.
5
+ #
6
+ # After modifying this file, be sure to run /usr/sbin/update-mime (as root)
7
+ # to propagate the changes into the /etc/mailcap file.
8
+ #
9
+ ################################################################################
10
+
my_container_sandbox/etc/mke2fs.conf ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [defaults]
2
+ base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr
3
+ default_mntopts = acl,user_xattr
4
+ enable_periodic_fsck = 0
5
+ blocksize = 4096
6
+ inode_size = 256
7
+ inode_ratio = 16384
8
+
9
+ [fs_types]
10
+ ext3 = {
11
+ features = has_journal
12
+ }
13
+ ext4 = {
14
+ features = has_journal,extent,huge_file,flex_bg,metadata_csum,64bit,dir_nlink,extra_isize
15
+ inode_size = 256
16
+ }
17
+ small = {
18
+ inode_size = 128
19
+ inode_ratio = 4096
20
+ }
21
+ floppy = {
22
+ inode_size = 128
23
+ inode_ratio = 8192
24
+ }
25
+ big = {
26
+ inode_ratio = 32768
27
+ }
28
+ huge = {
29
+ inode_ratio = 65536
30
+ }
31
+ news = {
32
+ inode_ratio = 4096
33
+ }
34
+ largefile = {
35
+ inode_ratio = 1048576
36
+ blocksize = -1
37
+ }
38
+ largefile4 = {
39
+ inode_ratio = 4194304
40
+ blocksize = -1
41
+ }
42
+ hurd = {
43
+ blocksize = 4096
44
+ inode_size = 128
45
+ }
46
+
47
+ [options]
48
+ fname_encoding = utf8
my_container_sandbox/etc/os-release ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NAME="Ubuntu"
2
+ VERSION="20.04.6 LTS (Focal Fossa)"
3
+ ID=ubuntu
4
+ ID_LIKE=debian
5
+ PRETTY_NAME="Ubuntu 20.04.6 LTS"
6
+ VERSION_ID="20.04"
7
+ HOME_URL="https://www.ubuntu.com/"
8
+ SUPPORT_URL="https://help.ubuntu.com/"
9
+ BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
10
+ PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
11
+ VERSION_CODENAME=focal
12
+ UBUNTU_CODENAME=focal
my_container_sandbox/etc/passwd ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ root:x:0:0:root:/root:/bin/bash
2
+ daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
3
+ bin:x:2:2:bin:/bin:/usr/sbin/nologin
4
+ sys:x:3:3:sys:/dev:/usr/sbin/nologin
5
+ sync:x:4:65534:sync:/bin:/bin/sync
6
+ games:x:5:60:games:/usr/games:/usr/sbin/nologin
7
+ man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
8
+ lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
9
+ mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
10
+ news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
11
+ uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
12
+ proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
13
+ www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
14
+ backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
15
+ list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
16
+ irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin
17
+ gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
18
+ nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
19
+ _apt:x:100:65534::/nonexistent:/usr/sbin/nologin
20
+ sshd:x:101:65534::/run/sshd:/usr/sbin/nologin
my_container_sandbox/etc/screenrc ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $Id: screenrc,v 1.15 2003/10/08 11:39:03 zal Exp $
2
+ #
3
+ # /etc/screenrc
4
+ #
5
+ # This is the system wide screenrc.
6
+ #
7
+ # You can use this file to change the default behavior of screen system wide
8
+ # or copy it to ~/.screenrc and use it as a starting point for your own
9
+ # settings.
10
+ #
11
+ # Commands in this file are used to set options, bind screen functions to
12
+ # keys, redefine terminal capabilities, and to automatically establish one or
13
+ # more windows at the beginning of your screen session.
14
+ #
15
+ # This is not a comprehensive list of options, look at the screen manual for
16
+ # details on everything that you can put in this file.
17
+ #
18
+
19
+ # ------------------------------------------------------------------------------
20
+ # SCREEN SETTINGS
21
+ # ------------------------------------------------------------------------------
22
+
23
+ #startup_message off
24
+ #nethack on
25
+
26
+ #defflow on # will force screen to process ^S/^Q
27
+ deflogin on
28
+ #autodetach off
29
+
30
+ # turn visual bell on
31
+ vbell on
32
+ vbell_msg " Wuff ---- Wuff!! "
33
+
34
+ # define a bigger scrollback, default is 100 lines
35
+ defscrollback 1024
36
+
37
+ # ------------------------------------------------------------------------------
38
+ # SCREEN KEYBINDINGS
39
+ # ------------------------------------------------------------------------------
40
+
41
+ # Remove some stupid / dangerous key bindings
42
+ bind ^k
43
+ #bind L
44
+ bind ^\
45
+ # Make them better
46
+ bind \\ quit
47
+ bind K kill
48
+ bind I login on
49
+ bind O login off
50
+ bind } history
51
+
52
+ # An example of a "screen scraper" which will launch urlview on the current
53
+ # screen window
54
+ #
55
+ #bind ^B eval "hardcopy_append off" "hardcopy -h $HOME/.screen-urlview" "screen urlview $HOME/.screen-urlview"
56
+
57
+ # ------------------------------------------------------------------------------
58
+ # TERMINAL SETTINGS
59
+ # ------------------------------------------------------------------------------
60
+
61
+ # The vt100 description does not mention "dl". *sigh*
62
+ termcapinfo vt100 dl=5\E[M
63
+
64
+ # turn sending of screen messages to hardstatus off
65
+ hardstatus off
66
+ # Set the hardstatus prop on gui terms to set the titlebar/icon title
67
+ termcapinfo xterm*|rxvt*|kterm*|Eterm* hs:ts=\E]0;:fs=\007:ds=\E]0;\007
68
+ # use this for the hard status string
69
+ hardstatus string "%h%? users: %u%?"
70
+
71
+ # An alternative hardstatus to display a bar at the bottom listing the
72
+ # windownames and highlighting the current windowname in blue. (This is only
73
+ # enabled if there is no hardstatus setting for your terminal)
74
+ #
75
+ #hardstatus lastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%<"
76
+
77
+ # set these terminals up to be 'optimal' instead of vt100
78
+ termcapinfo xterm*|linux*|rxvt*|Eterm* OP
79
+
80
+ # Change the xterm initialization string from is2=\E[!p\E[?3;4l\E[4l\E>
81
+ # (This fixes the "Aborted because of window size change" konsole symptoms found
82
+ # in bug #134198)
83
+ termcapinfo xterm 'is=\E[r\E[m\E[2J\E[H\E[?7h\E[?1;4;6l'
84
+
85
+ # To get screen to add lines to xterm's scrollback buffer, uncomment the
86
+ # following termcapinfo line which tells xterm to use the normal screen buffer
87
+ # (which has scrollback), not the alternate screen buffer.
88
+ #
89
+ #termcapinfo xterm|xterms|xs|rxvt ti@:te@
90
+
91
+ # Enable non-blocking mode to better cope with flaky ssh connections.
92
+ defnonblock 5
93
+
94
+ # ------------------------------------------------------------------------------
95
+ # STARTUP SCREENS
96
+ # ------------------------------------------------------------------------------
97
+
98
+ # Example of automatically running some programs in windows on screen startup.
99
+ #
100
+ # The following will open top in the first window, an ssh session to monkey
101
+ # in the next window, and then open mutt and tail in windows 8 and 9
102
+ # respectively.
103
+ #
104
+ # screen top
105
+ # screen -t monkey ssh monkey
106
+ # screen -t mail 8 mutt
107
+ # screen -t daemon 9 tail -f /var/log/daemon.log
108
+
my_container_sandbox/etc/sensors3.conf ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # libsensors configuration file
2
+ # -----------------------------
3
+ #
4
+ # This default configuration file only includes statements which do not
5
+ # differ from one mainboard to the next. Only label, compute and set
6
+ # statements for internal voltage and temperature sensors are included.
7
+ #
8
+ # In general, local changes should not be added to this file, but rather
9
+ # placed in custom configuration files located in /etc/sensors.d. This
10
+ # approach makes further updates much easier.
11
+ #
12
+ # Such custom configuration files for specific mainboards can be found in
13
+ # "configs" directory of lm-sensors package.
14
+ #
15
+ # Please contribute back a configuration of your board so other users with
16
+ # the same hardware won't need to recreate it again and again.
17
+
18
+ chip "lm78-*" "lm79-*" "lm80-*" "lm96080-*"
19
+
20
+ label temp1 "M/B Temp"
21
+
22
+
23
+ chip "w83792d-*"
24
+
25
+ label in0 "VcoreA"
26
+ label in1 "VcoreB"
27
+ label in6 "+5V"
28
+ label in7 "5VSB"
29
+ label in8 "Vbat"
30
+
31
+ set in6_min 5.0 * 0.90
32
+ set in6_max 5.0 * 1.10
33
+ set in7_min 5.0 * 0.90
34
+ set in7_max 5.0 * 1.10
35
+ set in8_min 3.0 * 0.90
36
+ set in8_max 3.0 * 1.10
37
+
38
+
39
+ chip "w83793-*"
40
+
41
+ label in0 "VcoreA"
42
+ label in1 "VcoreB"
43
+ label in7 "+5V"
44
+ label in8 "5VSB"
45
+ label in9 "Vbat"
46
+
47
+ set in7_min 5.0 * 0.90
48
+ set in7_max 5.0 * 1.10
49
+ set in8_min 5.0 * 0.90
50
+ set in8_max 5.0 * 1.10
51
+ set in9_min 3.0 * 0.90
52
+ set in9_max 3.0 * 1.10
53
+
54
+
55
+ chip "w83795g-*" "w83795adg-*"
56
+
57
+ label in12 "+3.3V"
58
+ label in13 "3VSB"
59
+ label in14 "Vbat"
60
+
61
+ set in12_min 3.3 * 0.90
62
+ set in12_max 3.3 * 1.10
63
+ set in13_min 3.3 * 0.90
64
+ set in13_max 3.3 * 1.10
65
+ set in14_min 3.0 * 0.90
66
+ set in14_max 3.3 * 1.10
67
+
68
+
69
+ chip "via686a-*"
70
+
71
+ label in0 "Vcore"
72
+ label in2 "+3.3V"
73
+ label in3 "+5V"
74
+ label in4 "+12V"
75
+
76
+ set in2_min 3.3 * 0.90
77
+ set in2_max 3.3 * 1.10
78
+ set in3_min 5.0 * 0.90
79
+ set in3_max 5.0 * 1.10
80
+ set in4_min 12.0 * 0.90
81
+ set in4_max 12.0 * 1.10
82
+
83
+
84
+ chip "adm1025-*" "ne1619-*"
85
+
86
+ label in1 "Vcore"
87
+ label in2 "+3.3V"
88
+ label in3 "+5V"
89
+ label in4 "+12V"
90
+ label in5 "VCC"
91
+
92
+ set in2_min 3.3 * 0.90
93
+ set in2_max 3.3 * 1.10
94
+ set in3_min 5.0 * 0.90
95
+ set in3_max 5.0 * 1.10
96
+ set in5_min 3.3 * 0.90
97
+ set in5_max 3.3 * 1.10
98
+ # Depending on how your chip is hardwired, you may or may not have
99
+ # +12V readings.
100
+ # set in4_min 12.0 * 0.90
101
+ # set in4_max 12.0 * 1.10
102
+
103
+ label temp1 "CPU Temp"
104
+ label temp2 "M/B Temp"
105
+
106
+
107
+ chip "lm87-*" "adm1024-*"
108
+
109
+ label in1 "Vcore"
110
+ label in2 "+3.3V"
111
+ label in3 "+5V"
112
+ label in4 "+12V"
113
+
114
+ set in2_min 3.3 * 0.90
115
+ set in2_max 3.3 * 1.10
116
+ set in3_min 5.0 * 0.90
117
+ set in3_max 5.0 * 1.10
118
+ set in4_min 12.0 * 0.90
119
+ set in4_max 12.0 * 1.10
120
+
121
+ label temp1 "M/B Temp"
122
+ label temp2 "CPU Temp"
123
+
124
+
125
+ chip "it87-*" "it8712-*" "it8716-*" "it8718-*" "it8720-*"
126
+
127
+ label in8 "Vbat"
128
+
129
+
130
+ chip "fscpos-*" "fscher-*"
131
+ #FSC "Hermes"
132
+
133
+ label in0 "+12V"
134
+ label in1 "+5V"
135
+ label in2 "Vbat"
136
+
137
+ label temp1 "CPU Temp"
138
+ label temp2 "M/B Temp"
139
+ label temp3 "Aux Temp"
140
+
141
+
142
+ chip "fscscy-*"
143
+ #FSC "Scylla"
144
+
145
+ label in0 "+12V"
146
+ label in1 "+5V"
147
+ label in2 "+3.3V"
148
+
149
+ label temp1 "CPU0 Temp"
150
+ label temp2 "CPU1 Temp"
151
+ label temp3 "M/B Temp"
152
+ label temp4 "Aux Temp"
153
+
154
+
155
+ chip "fschds-*"
156
+ # Fujitsu Technology Solutions, "Hades"-Chip
157
+
158
+ # Temperatures
159
+ label temp1 "CPU Temp"
160
+ label temp2 "Super I/O Temp"
161
+ label temp3 "System Temp"
162
+
163
+ # Fans
164
+ label fan1 "PSU Fan"
165
+ label fan2 "CPU Fan"
166
+ label fan3 "System FAN2"
167
+ label fan4 "System FAN3"
168
+ label fan5 "System FAN4"
169
+
170
+ # Voltages
171
+ label in0 "+12V"
172
+ label in1 "+5V"
173
+ label in2 "Vbat"
174
+
175
+ chip "fscsyl-*"
176
+ # Fujitsu Technology Solutions, "Syleus"-Chip
177
+
178
+ # Temperatures
179
+ label temp1 "CPU Temp"
180
+ label temp4 "Super I/O Temp"
181
+ label temp5 "Northbridge Temp"
182
+
183
+ # Fans
184
+ label fan1 "CPU Fan"
185
+ label fan2 "System FAN2"
186
+ label fan3 "System FAN3"
187
+ label fan4 "System FAN4"
188
+ label fan7 "PSU Fan"
189
+
190
+ # Voltages
191
+ label in0 "+12V"
192
+ label in1 "+5V"
193
+ label in2 "Vbat"
194
+ label in3 "+3.3V"
195
+ label in5 "+3.3V-Aux"
196
+
197
+ chip "vt1211-*"
198
+
199
+ label in5 "+3.3V"
200
+
201
+ label temp2 "SIO Temp"
202
+
203
+
204
+ chip "vt8231-*"
205
+
206
+ label in5 "+3.3V"
207
+
208
+
209
+ chip "smsc47m192-*"
210
+
211
+ label in1 "Vcore"
212
+ label in2 "+3.3V"
213
+ label in3 "+5V"
214
+ label in4 "+12V"
215
+ label in5 "VCC"
216
+
217
+ set in2_min 3.3 * 0.90
218
+ set in2_max 3.3 * 1.10
219
+ set in3_min 5.0 * 0.90
220
+ set in3_max 5.0 * 1.10
221
+ set in4_min 12.0 * 0.90
222
+ set in4_max 12.0 * 1.10
223
+ set in5_min 3.3 * 0.90
224
+ set in5_max 3.3 * 1.10
225
+
226
+ label temp1 "SIO Temp"
227
+
228
+
229
+ chip "lm85-*" "lm85b-*" "lm85c-*" "adm1027-*" "adt7463-*" "adt7468-*" \
230
+ "emc6d100-*" "emc6d102-*" "emc6d103-*" "emc6d103s-*"
231
+
232
+ label in1 "Vcore"
233
+ label in2 "+3.3V"
234
+ label in3 "+5V"
235
+ label in4 "+12V"
236
+
237
+ set in2_min 3.3 * 0.90
238
+ set in2_max 3.3 * 1.10
239
+ set in3_min 5.0 * 0.90
240
+ set in3_max 5.0 * 1.10
241
+ # Depending on how your chip is hardwired, you may or may not have
242
+ # +12V readings.
243
+ # set in4_min 12.0 * 0.90
244
+ # set in4_max 12.0 * 1.10
245
+
246
+ label temp2 "M/B Temp"
247
+
248
+
249
+ chip "emc6w201-*"
250
+
251
+ label in2 "+3.3V"
252
+ label in3 "+5V"
253
+
254
+ label temp6 "M/B Temp"
255
+
256
+
257
+ chip "pc87365-*" "pc87366-*"
258
+
259
+ # Voltage inputs
260
+
261
+ label in7 "3VSB"
262
+ label in8 "VDD"
263
+ label in9 "Vbat"
264
+ label in10 "AVDD"
265
+
266
+ compute in7 @*2, @/2
267
+ compute in8 @*2, @/2
268
+ compute in10 @*2, @/2
269
+
270
+ # These are the operating conditions as recommended by National
271
+ # Semiconductor
272
+ set in7_min 3.0
273
+ set in7_max 3.6
274
+ set in8_min 3.0
275
+ set in8_max 3.6
276
+ set in10_min 3.0
277
+ set in10_max 3.6
278
+ # Depending on the hardware setup, the battery voltage may or may not
279
+ # be monitored.
280
+ # set in9_min 2.4
281
+ # set in9_max 3.6
282
+
283
+ label temp3 "SIO Temp"
284
+
285
+ set temp3_min 0
286
+ set temp3_max 70
287
+ set temp3_crit 85
288
+
289
+
290
+ chip "adm1030-*" "adm1031-*"
291
+
292
+ label temp1 "M/B Temp"
293
+
294
+
295
+ chip "w83627thf-*"
296
+
297
+ label in3 "+5V"
298
+ label in7 "5VSB"
299
+ label in8 "Vbat"
300
+
301
+ # Internal resistors
302
+ compute in3 @ * (1 + 34/51), @ / (1 + 34/51)
303
+ compute in7 @ * (1 + 34/51), @ / (1 + 34/51)
304
+
305
+ set in3_min 5.0 * 0.90
306
+ set in3_max 5.0 * 1.10
307
+ set in7_min 5.0 * 0.90
308
+ set in7_max 5.0 * 1.10
309
+ # The battery voltage may or may not be monitored.
310
+ # set in8_min 3.0 * 0.90
311
+ # set in8_max 3.0 * 1.10
312
+
313
+
314
+ chip "w83627ehf-*" "w83627dhg-*" "w83667hg-*" "nct6775-*" "nct6776-*" \
315
+ "nct6779-*" "nct6791-*" "nct6795-*" "nct6796-*"
316
+
317
+ label in0 "Vcore"
318
+ label in2 "AVCC"
319
+ label in3 "+3.3V"
320
+ label in7 "3VSB"
321
+ label in8 "Vbat"
322
+
323
+ set in2_min 3.3 * 0.90
324
+ set in2_max 3.3 * 1.10
325
+ set in3_min 3.3 * 0.90
326
+ set in3_max 3.3 * 1.10
327
+ set in7_min 3.3 * 0.90
328
+ set in7_max 3.3 * 1.10
329
+ set in8_min 3.0 * 0.90
330
+ set in8_max 3.3 * 1.10
331
+
332
+
333
+ chip "w83627uhg-*"
334
+
335
+ label in2 "AVCC"
336
+ label in3 "+5V"
337
+ label in7 "5VSB"
338
+ label in8 "Vbat"
339
+
340
+ set in2_min 5.0 * 0.90
341
+ set in2_max 5.0 * 1.10
342
+ set in3_min 5.0 * 0.90
343
+ set in3_max 5.0 * 1.10
344
+ set in7_min 5.0 * 0.90
345
+ set in7_max 5.0 * 1.10
346
+ set in8_min 3.0 * 0.90
347
+ set in8_max 3.3 * 1.10
348
+
349
+
350
+ chip "f71805f-*"
351
+
352
+ label in0 "+3.3V"
353
+
354
+ set in0_min 3.3 * 0.90
355
+ set in0_max 3.3 * 1.10
356
+
357
+
358
+ chip "f71872f-*"
359
+
360
+ label in0 "+3.3V"
361
+ label in9 "Vbat"
362
+ label in10 "3VSB"
363
+
364
+ set in0_min 3.3 * 0.90
365
+ set in0_max 3.3 * 1.10
366
+ set in9_min 3.0 * 0.90
367
+ set in9_max 3.0 * 1.10
368
+ set in10_min 3.3 * 0.90
369
+ set in10_max 3.3 * 1.10
370
+
371
+
372
+ chip "k8temp-*"
373
+
374
+ label temp1 "Core0 Temp"
375
+ label temp2 "Core0 Temp"
376
+ label temp3 "Core1 Temp"
377
+ label temp4 "Core1 Temp"
378
+
379
+
380
+ chip "dme1737-*"
381
+
382
+ label in0 "5VSB"
383
+ label in1 "Vcore"
384
+ label in2 "+3.3V"
385
+ label in3 "+5V"
386
+ label in4 "+12V"
387
+ label in5 "3VSB"
388
+ label in6 "Vbat"
389
+
390
+ label temp2 "SIO Temp"
391
+
392
+ set in0_min 5.0 * 0.90
393
+ set in0_max 5.0 * 1.10
394
+ set in2_min 3.3 * 0.90
395
+ set in2_max 3.3 * 1.10
396
+ set in3_min 5.0 * 0.90
397
+ set in3_max 5.0 * 1.10
398
+ set in4_min 12.0 * 0.90
399
+ set in4_max 12.0 * 1.10
400
+ set in5_min 3.3 * 0.90
401
+ set in5_max 3.3 * 1.10
402
+ set in6_min 3.0 * 0.90
403
+ set in6_max 3.0 * 1.10
404
+
405
+
406
+ chip "sch311x-*"
407
+
408
+ label in1 "Vcore"
409
+ label in2 "+3.3V"
410
+ label in3 "+5V"
411
+ label in4 "+12V"
412
+ label in5 "3VSB"
413
+ label in6 "Vbat"
414
+
415
+ label temp2 "SIO Temp"
416
+
417
+ set in2_min 3.3 * 0.90
418
+ set in2_max 3.3 * 1.10
419
+ set in3_min 5.0 * 0.90
420
+ set in3_max 5.0 * 1.10
421
+ set in4_min 12.0 * 0.90
422
+ set in4_max 12.0 * 1.10
423
+ set in5_min 3.3 * 0.90
424
+ set in5_max 3.3 * 1.10
425
+ set in6_min 3.0 * 0.90
426
+ set in6_max 3.0 * 1.10
427
+
428
+
429
+ chip "sch5027-*"
430
+
431
+ label in0 "5VSB"
432
+ label in1 "Vcore"
433
+ label in2 "+3.3V"
434
+ label in5 "3VSB"
435
+ label in6 "Vbat"
436
+
437
+ label temp2 "SIO Temp"
438
+
439
+ set in0_min 5.0 * 0.90
440
+ set in0_max 5.0 * 1.10
441
+ set in2_min 3.3 * 0.90
442
+ set in2_max 3.3 * 1.10
443
+ set in5_min 3.3 * 0.90
444
+ set in5_max 3.3 * 1.10
445
+ set in6_min 3.0 * 0.90
446
+ set in6_max 3.0 * 1.10
447
+
448
+
449
+ chip "sch5127-*"
450
+
451
+ label in2 "+3.3V"
452
+ label in5 "3VSB"
453
+ label in6 "Vbat"
454
+
455
+ set in2_min 3.3 * 0.90
456
+ set in2_max 3.3 * 1.10
457
+ set in5_min 3.3 * 0.90
458
+ set in5_max 3.3 * 1.10
459
+ set in6_min 3.0 * 0.90
460
+ set in6_max 3.0 * 1.10
461
+
462
+
463
+ chip "f71808e-*" "f71808a-*" "f71862fg-*" "f71869-*" "f71869a-*" "f71882fg-*" \
464
+ "f71889fg-*" "f71889ed-*" "f71889a-*"
465
+
466
+ label in0 "+3.3V"
467
+ label in7 "3VSB"
468
+ label in8 "Vbat"
469
+
470
+ compute in0 @*2, @/2
471
+ compute in7 @*2, @/2
472
+ compute in8 @*2, @/2
473
+
474
+
475
+ chip "f71858fg-*" "f8000-*"
476
+
477
+ label in0 "+3.3V"
478
+ label in1 "3VSB"
479
+ label in2 "Vbat"
480
+
481
+ compute in0 @*2, @/2
482
+ compute in1 @*2, @/2
483
+ compute in2 @*2, @/2
484
+
485
+
486
+ chip "f71868a-*"
487
+
488
+ label in0 "+3.3V"
489
+ label in7 "3VSB"
490
+ label in8 "Vbat"
491
+ label in9 "5VSB"
492
+
493
+ compute in0 @*2, @/2
494
+ compute in7 @*2, @/2
495
+ compute in8 @*2, @/2
496
+ compute in9 @*3, @/3
497
+
498
+
499
+ chip "f81865f-*"
500
+
501
+ label in0 "+3.3V"
502
+ label in5 "3VSB"
503
+ label in6 "Vbat"
504
+
505
+ compute in0 @*2, @/2
506
+ compute in5 @*2, @/2
507
+ compute in6 @*2, @/2
508
+
509
+
510
+ chip "adt7473-*" "adt7475-*"
511
+
512
+ label in2 "+3.3V"
513
+
514
+ set in2_min 3.3 * 0.90
515
+ set in2_max 3.3 * 1.10
516
+
517
+ label temp2 "Board Temp"
518
+
519
+
520
+ chip "adt7476-*" "adt7490-*"
521
+
522
+ label in1 "Vcore"
523
+ label in2 "+3.3V"
524
+ label in3 "+5V"
525
+ label in4 "+12V"
526
+
527
+ set in2_min 3.3 * 0.90
528
+ set in2_max 3.3 * 1.10
529
+ set in3_min 5.0 * 0.90
530
+ set in3_max 5.0 * 1.10
531
+ # Depending on how your ADT7476 is hardwired, you may or may not have
532
+ # +12V readings.
533
+ # set in4_min 12.0 * 0.90
534
+ # set in4_max 12.0 * 1.10
535
+
536
+ label temp2 "M/B Temp"
my_container_sandbox/etc/shadow- ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ root:*:19633:0:99999:7:::
2
+ daemon:*:19633:0:99999:7:::
3
+ bin:*:19633:0:99999:7:::
4
+ sys:*:19633:0:99999:7:::
5
+ sync:*:19633:0:99999:7:::
6
+ games:*:19633:0:99999:7:::
7
+ man:*:19633:0:99999:7:::
8
+ lp:*:19633:0:99999:7:::
9
+ mail:*:19633:0:99999:7:::
10
+ news:*:19633:0:99999:7:::
11
+ uucp:*:19633:0:99999:7:::
12
+ proxy:*:19633:0:99999:7:::
13
+ www-data:*:19633:0:99999:7:::
14
+ backup:*:19633:0:99999:7:::
15
+ list:*:19633:0:99999:7:::
16
+ irc:*:19633:0:99999:7:::
17
+ gnats:*:19633:0:99999:7:::
18
+ nobody:*:19633:0:99999:7:::
19
+ _apt:*:19633:0:99999:7:::
20
+ sshd:*:19800:0:99999:7:::
my_container_sandbox/etc/subgid ADDED
File without changes
my_container_sandbox/etc/subuid ADDED
File without changes
my_container_sandbox/workspace/.pip/pip.conf ADDED
@@ -0,0 +1 @@
 
 
1
+ [global]
my_container_sandbox/workspace/anaconda3/LICENSE.txt ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ===================================
2
+ End User License Agreement - Miniconda
3
+ ===================================
4
+
5
+ Copyright 2015-2021, Anaconda, Inc.
6
+
7
+ All rights reserved under the 3-clause BSD License:
8
+
9
+ This End User License Agreement (the "Agreement") is a legal agreement between you and Anaconda, Inc. ("Anaconda") and governs your use of Miniconda.
10
+
11
+ Subject to the terms of this Agreement, Anaconda hereby grants you a non-exclusive, non-transferable license to:
12
+
13
+ * Install and use the Miniconda.
14
+ * Modify and create derivative works of sample source code delivered in Miniconda subject to the Terms of Service for the Repository (as defined hereinafter) available at https://www.anaconda.com/terms-of-service, and
15
+
16
+ * Redistribute code files in source (if provided to you by Anaconda as source) and binary forms, with or without modification subject to the requirements set forth below.
17
+
18
+ Anaconda may, at its option, make available patches, workarounds or other updates to Miniconda. Unless the updates are provided with their separate governing terms, they are deemed part of Miniconda licensed to you as provided in this Agreement. This Agreement does not entitle you to any support for Miniconda.
19
+
20
+ Anaconda reserves all rights not expressly granted to you in this Agreement.
21
+
22
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
23
+
24
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
25
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
26
+ * Neither the name of Anaconda nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
27
+
28
+ You acknowledge that, as between you and Anaconda, Anaconda owns all right, title, and interest, including all intellectual property rights, in and to Miniconda and, with respect to third-party products distributed with or through Miniconda, the applicable third-party licensors own all right, title and interest, including all intellectual property rights, in and to such products. If you send or transmit any communications or materials to Anaconda suggesting or recommending changes to the software or documentation, including without limitation, new features or functionality relating thereto, or any comments, questions, suggestions or the like ("Feedback"), Anaconda is free to use such Feedback. You hereby assign to Anaconda all right, title, and interest in, and Anaconda is free to use, without any attribution or compensation to any party, any ideas, know-how, concepts, techniques or other intellectual property rights contained in the Feedback, for any purpose whatsoever, although Anaconda is not required to use any Feedback.
29
+
30
+ DISCLAIMER
31
+ ==========
32
+
33
+ THIS SOFTWARE IS PROVIDED BY ANACONDA AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANACONDA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
+
35
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, ANACONDA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF ANACONDA INDIVIDUAL EDITION, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL THE TOTAL CUMULATIVE LIABILITY OF ANACONDA AND ITS AFFILIATES UNDER OR ARISING OUT OF THIS AGREEMENT EXCEED US$10.00.
36
+
37
+ Miscellaneous
38
+ =============
39
+
40
+ If you want to terminate this Agreement, you may do so by discontinuing use of Miniconda. Anaconda may, at any time, terminate this Agreement and the license granted hereunder if you fail to comply with any term of this Agreement. Upon any termination of this Agreement, you agree to promptly discontinue use of the Miniconda and destroy all copies in your possession or control. Upon any termination of this Agreement all provisions survive except for the licenses granted to you.
41
+
42
+ This Agreement is governed by and construed in accordance with the internal laws of the State of Texas without giving effect to any choice or conflict of law provision or rule that would require or permit the application of the laws of any jurisdiction other than those of the State of Texas. Any legal suit, action, or proceeding arising out of or related to this Agreement or the licenses granted hereunder by you must be instituted exclusively in the federal courts of the United States or the courts of the State of Texas in each case located in Travis County, Texas, and you irrevocably submit to the jurisdiction of such courts in any such suit, action, or proceeding.
43
+
44
+ Notice of Third Party Software Licenses
45
+ =======================================
46
+
47
+ Miniconda provides access to a repository (the "Repository") which contains software packages or tools licensed on an open source basis from third parties and binary packages of these third party tools. These third party software packages or tools are provided on an "as is" basis and are subject to their respective license agreements as well as this Agreement and the Terms of Service for the Repository located at https://www.anaconda.com/terms-of-service; provided, however, no restriction contained in the Terms of Service shall be construed so as to limit Your ability to download the packages contained in Miniconda provided you comply with the license for each such package. These licenses may be accessed from within the Miniconda software[1] or https://www.anaconda.com/legal. Information regarding which license is applicable is available from within many of the third party software packages and tools and athttps://repo.anaconda.com/pkgs/main/ andhttps://repo.anaconda.com/pkgs/r/. Anaconda reserves the right, in its sole discretion, to change which third party tools are included in the Repository accessible through Miniconda.
48
+
49
+
50
+ Intel Math Kernel Library
51
+ -------------------------
52
+
53
+ Miniconda provides access to re-distributable, run-time, shared-library files from the Intel Math Kernel Library ("MKL binaries").
54
+
55
+ Copyright 2018 Intel Corporation. License available at https://software.intel.com/en-us/license/intel-simplified-software-license (the "MKL License").
56
+
57
+ You may use and redistribute the MKL binaries, without modification, provided the following conditions are met:
58
+
59
+ * Redistributions must reproduce the above copyright notice and the following terms of use in the MKL binaries and in the documentation and/or other materials provided with the distribution.
60
+ * Neither the name of Intel nor the names of its suppliers may be used to endorse or promote products derived from the MKL binaries without specific prior written permission.
61
+ * No reverse engineering, decompilation, or disassembly of the MKL binaries is permitted.
62
+
63
+ You are specifically authorized to use and redistribute the MKL binaries with your installation of Miniconda subject to the terms set forth in the MKL License. You are also authorized to redistribute the MKL binaries with Miniconda or in the Anaconda package that contains the MKL binaries. If needed, instructions for removing the MKL binaries after installation of Miniconda are available at https://docs.anaconda.com.
64
+
65
+ cuDNN Software
66
+ --------------
67
+
68
+ Miniconda also provides access to cuDNN software binaries ("cuDNN binaries") from NVIDIA Corporation. You are specifically authorized to use the cuDNN binaries with your installation of Miniconda subject to your compliance with the license agreement located at https://docs.nvidia.com/deeplearning/sdk/cudnn-sla/index.html. You are also authorized to redistribute the cuDNN binaries with an Miniconda package that contains the cuDNN binaries. You can add or remove the cuDNN binaries utilizing the install and uninstall features in Miniconda.
69
+
70
+ cuDNN binaries contain source code provided by NVIDIA Corporation.
71
+
72
+ Export; Cryptography Notice
73
+ ===========================
74
+
75
+ You must comply with all domestic and international export laws and regulations that apply to the software, which include restrictions on destinations, end users, and end use. Miniconda includes cryptographic software. The country in which you currently reside may have restrictions on the import, possession, use, and/or re-export to another country, of encryption software. BEFORE using any encryption software, please check your country's laws, regulations and policies concerning the import, possession, or use, and re-export of encryption software, to see if this is permitted. See the Wassenaar Arrangement http://www.wassenaar.org/ for more information.
76
+
77
+ Anaconda has self-classified this software as Export Commodity Control Number (ECCN) EAR99, which includes mass market information security software using or performing cryptographic functions with asymmetric algorithms. No license is required for export of this software to non-embargoed countries.
78
+
79
+ The Intel Math Kernel Library contained in Miniconda is classified by Intel as ECCN 5D992.c with no license required for export to non-embargoed countries.
80
+
81
+ The following packages listed on https://www.anaconda.com/cryptography are included in the repository accessible through Miniconda that relate to cryptography.
82
+
83
+ Last updated June 24, 2021
my_container_sandbox/workspace/difftumor/eval2.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, shutil, argparse, torch
2
+ from tqdm import tqdm
3
+ import sys
4
+ sys.path.append('.')
5
+ # print('sys.path', sys.path)
6
+ join = os.path.join
7
+
8
+
9
+ def eval(args, test_data_list):
10
+ gpu = args.gpu
11
+ try:
12
+ os.system('CUDA_VISIBLE_DEVICES={} python organ_mask_access/organ_test.py'.format(gpu))
13
+
14
+ os.system('CUDA_VISIBLE_DEVICES={} python tumor_mask_access/liver_tumor.py'.format(gpu))
15
+ os.system('CUDA_VISIBLE_DEVICES={} python tumor_mask_access/pancreas_tumor.py'.format(gpu))
16
+ os.system('CUDA_VISIBLE_DEVICES={} python tumor_mask_access/kidney_tumor.py'.format(gpu))
17
+ except Exception as e:
18
+ print('error')
19
+
20
+
21
+
22
+
23
+ if __name__ == '__main__':
24
+ parser = argparse.ArgumentParser()
25
+ parser.add_argument('--gpu', default=0, type=int, help='evaluation results will be saved in this folder')
26
+
27
+ args = parser.parse_args()
28
+ data_path = '/workspace/inputs'
29
+ test_data_list = os.listdir(data_path)
30
+ test_data_list.sort()
31
+ eval(args, test_data_list)
32
+
my_container_sandbox/workspace/difftumor/organ_mask_access/dataset/dataloader.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from monai.transforms import (
2
+ AsDiscrete,
3
+ AddChanneld,
4
+ Compose,
5
+ CropForegroundd,
6
+ LoadImaged,
7
+ Orientationd,
8
+ RandFlipd,
9
+ RandCropByPosNegLabeld,
10
+ RandShiftIntensityd,
11
+ ScaleIntensityRanged,
12
+ Spacingd,
13
+ RandRotate90d,
14
+ ToTensord,
15
+ CenterSpatialCropd,
16
+ Resized,
17
+ SpatialPadd,
18
+ apply_transform,
19
+ RandZoomd,
20
+ RandCropByLabelClassesd,
21
+ )
22
+
23
+ import collections.abc
24
+ import math
25
+ import pickle
26
+ import shutil
27
+ import sys
28
+ import tempfile
29
+ import threading
30
+ import time
31
+ import warnings
32
+ from copy import copy, deepcopy
33
+ import h5py
34
+
35
+
36
+ import numpy as np
37
+ import torch
38
+ from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
39
+
40
+ sys.path.append("..")
41
+ from utils.utils import get_key
42
+
43
+ from torch.utils.data import Subset
44
+
45
+ from monai.data import DataLoader, Dataset, list_data_collate, DistributedSampler, CacheDataset
46
+ from monai.config import DtypeLike, KeysCollection
47
+ from monai.transforms.transform import Transform, MapTransform
48
+ from monai.utils.enums import TransformBackends
49
+ from monai.config.type_definitions import NdarrayOrTensor
50
+ from monai.transforms.io.array import LoadImage, SaveImage
51
+ from monai.utils import GridSamplePadMode, ensure_tuple, ensure_tuple_rep
52
+ from monai.data.image_reader import ImageReader
53
+ from monai.utils.enums import PostFix
54
+ import os
55
+
56
+ DEFAULT_POST_FIX = PostFix.meta()
57
+
58
+ class LoadImageh5d(MapTransform):
59
+ def __init__(
60
+ self,
61
+ keys: KeysCollection,
62
+ reader: Optional[Union[ImageReader, str]] = None,
63
+ dtype: DtypeLike = np.float32,
64
+ meta_keys: Optional[KeysCollection] = None,
65
+ meta_key_postfix: str = DEFAULT_POST_FIX,
66
+ overwriting: bool = False,
67
+ image_only: bool = False,
68
+ ensure_channel_first: bool = False,
69
+ simple_keys: bool = False,
70
+ allow_missing_keys: bool = False,
71
+ *args,
72
+ **kwargs,
73
+ ) -> None:
74
+ super().__init__(keys, allow_missing_keys)
75
+ self._loader = LoadImage(reader, image_only, dtype, ensure_channel_first, simple_keys, *args, **kwargs)
76
+ if not isinstance(meta_key_postfix, str):
77
+ raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
78
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
79
+ if len(self.keys) != len(self.meta_keys):
80
+ raise ValueError("meta_keys should have the same length as keys.")
81
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
82
+ self.overwriting = overwriting
83
+
84
+
85
+ def register(self, reader: ImageReader):
86
+ self._loader.register(reader)
87
+
88
+
89
+ def __call__(self, data, reader: Optional[ImageReader] = None):
90
+ d = dict(data)
91
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
92
+ data = self._loader(d[key], reader)
93
+ if self._loader.image_only:
94
+ d[key] = data
95
+ else:
96
+ if not isinstance(data, (tuple, list)):
97
+ raise ValueError("loader must return a tuple or list (because image_only=False was used).")
98
+ d[key] = data[0]
99
+ if not isinstance(data[1], dict):
100
+ raise ValueError("metadata must be a dict.")
101
+ meta_key = meta_key or f"{key}_{meta_key_postfix}"
102
+ if meta_key in d and not self.overwriting:
103
+ raise KeyError(f"Metadata with key {meta_key} already exists and overwriting=False.")
104
+ d[meta_key] = data[1]
105
+ return d
106
+
107
+ def get_loader(args, test_data_list):
108
+ val_transforms = Compose(
109
+ [
110
+ LoadImaged(keys=["image"]),
111
+ AddChanneld(keys=["image"]),
112
+ Orientationd(keys=["image"], axcodes="RAS"),
113
+ Spacingd(
114
+ keys=["image"],
115
+ pixdim=(args.space_x, args.space_y, args.space_z),
116
+ mode=("bilinear"),
117
+ ),
118
+ ScaleIntensityRanged(
119
+ keys=["image"],
120
+ a_min=args.a_min,
121
+ a_max=args.a_max,
122
+ b_min=args.b_min,
123
+ b_max=args.b_max,
124
+ clip=True,
125
+ ),
126
+ CropForegroundd(keys=["image"], source_key="image"),
127
+ ToTensord(keys=["image"]),
128
+ ]
129
+ )
130
+
131
+ ## test dict part
132
+ test_img = []
133
+ test_name = []
134
+
135
+ for data_name in test_data_list:
136
+ name = data_name.split('.nii')[0]
137
+ test_img.append(os.path.join(args.data_root_path, data_name))
138
+ test_name.append(name)
139
+
140
+ data_dicts_test = [{'image': image, 'name': name}
141
+ for image, name in zip(test_img ,test_name)]
142
+ print('test len {}'.format(len(data_dicts_test)))
143
+
144
+ test_dataset = Dataset(data=data_dicts_test, transform=val_transforms)
145
+ test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=list_data_collate)
146
+ return test_loader, val_transforms
147
+
148
+ if __name__ == "__main__":
149
+ train_loader, test_loader = partial_label_dataloader()
150
+ for index, item in enumerate(test_loader):
151
+ print(item['image'].shape, item['label'].shape, item['task_id'])
152
+ input()
my_container_sandbox/workspace/difftumor/organ_mask_access/model/DiNTS.py ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from monai.networks.blocks.dints_block import (
10
+ ActiConvNormBlock,
11
+ FactorizedIncreaseBlock,
12
+ FactorizedReduceBlock,
13
+ P3DActiConvNormBlock,
14
+ )
15
+ from monai.networks.layers.factories import Conv
16
+ from monai.networks.layers.utils import get_act_layer, get_norm_layer
17
+ from monai.utils import optional_import
18
+
19
+ # solving shortest path problem
20
+ csr_matrix, _ = optional_import("scipy.sparse", name="csr_matrix")
21
+ dijkstra, _ = optional_import("scipy.sparse.csgraph", name="dijkstra")
22
+
23
+ @torch.jit.interface
24
+ class CellInterface(torch.nn.Module):
25
+ """interface for torchscriptable Cell"""
26
+
27
+ def forward(self, x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: # type: ignore
28
+ pass
29
+
30
+
31
+ @torch.jit.interface
32
+ class StemInterface(torch.nn.Module):
33
+ """interface for torchscriptable Stem"""
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
36
+ pass
37
+
38
+
39
+ class StemTS(StemInterface):
40
+ """wrapper for torchscriptable Stem"""
41
+
42
+ def __init__(self, *mod):
43
+ super().__init__()
44
+ self.mod = torch.nn.Sequential(*mod)
45
+
46
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
47
+ return self.mod(x) # type: ignore
48
+
49
+
50
+ def _dfs(node, paths):
51
+ """use depth first search to find all path activation combination"""
52
+ if node == paths:
53
+ return [[0], [1]]
54
+ child = _dfs(node + 1, paths)
55
+ return [[0] + _ for _ in child] + [[1] + _ for _ in child]
56
+
57
+
58
+ class _IdentityWithRAMCost(nn.Identity):
59
+ def __init__(self, *args, **kwargs):
60
+ super().__init__(*args, **kwargs)
61
+ self.ram_cost = 0
62
+
63
+
64
+ class _CloseWithRAMCost(nn.Module):
65
+ def __init__(self):
66
+ super().__init__()
67
+ self.ram_cost = 0
68
+
69
+ def forward(self, x):
70
+ return torch.tensor(0.0, requires_grad=False).to(x)
71
+
72
+
73
+ class _ActiConvNormBlockWithRAMCost(ActiConvNormBlock):
74
+ """The class wraps monai layers with ram estimation. The ram_cost = total_ram/output_size is estimated.
75
+ Here is the estimation:
76
+ feature_size = output_size/out_channel
77
+ total_ram = ram_cost * output_size
78
+ total_ram = in_channel * feature_size (activation map) +
79
+ in_channel * feature_size (convolution map) +
80
+ out_channel * feature_size (normalization)
81
+ = (2*in_channel + out_channel) * output_size/out_channel
82
+ ram_cost = total_ram/output_size = 2 * in_channel/out_channel + 1
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ in_channel: int,
88
+ out_channel: int,
89
+ kernel_size: int,
90
+ padding: int,
91
+ spatial_dims: int = 3,
92
+ act_name: Union[Tuple, str] = "RELU",
93
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
94
+ ):
95
+ super().__init__(in_channel, out_channel, kernel_size, padding, spatial_dims, act_name, norm_name)
96
+ self.ram_cost = 1 + in_channel / out_channel * 2
97
+
98
+
99
+ class _P3DActiConvNormBlockWithRAMCost(P3DActiConvNormBlock):
100
+ def __init__(
101
+ self,
102
+ in_channel: int,
103
+ out_channel: int,
104
+ kernel_size: int,
105
+ padding: int,
106
+ p3dmode: int = 0,
107
+ act_name: Union[Tuple, str] = "RELU",
108
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
109
+ ):
110
+ super().__init__(in_channel, out_channel, kernel_size, padding, p3dmode, act_name, norm_name)
111
+ # 1 in_channel (activation) + 1 in_channel (convolution) +
112
+ # 1 out_channel (convolution) + 1 out_channel (normalization)
113
+ self.ram_cost = 2 + 2 * in_channel / out_channel
114
+
115
+
116
+ class _FactorizedIncreaseBlockWithRAMCost(FactorizedIncreaseBlock):
117
+ def __init__(
118
+ self,
119
+ in_channel: int,
120
+ out_channel: int,
121
+ spatial_dims: int = 3,
122
+ act_name: Union[Tuple, str] = "RELU",
123
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
124
+ ):
125
+ super().__init__(in_channel, out_channel, spatial_dims, act_name, norm_name)
126
+ # s0 is upsampled 2x from s1, representing feature sizes at two resolutions.
127
+ # 2 * in_channel * s0 (upsample + activation) + 2 * out_channel * s0 (conv + normalization)
128
+ # s0 = output_size/out_channel
129
+ self.ram_cost = 2 * in_channel / out_channel + 2
130
+
131
+
132
+ class _FactorizedReduceBlockWithRAMCost(FactorizedReduceBlock):
133
+ def __init__(
134
+ self,
135
+ in_channel: int,
136
+ out_channel: int,
137
+ spatial_dims: int = 3,
138
+ act_name: Union[Tuple, str] = "RELU",
139
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
140
+ ):
141
+ super().__init__(in_channel, out_channel, spatial_dims, act_name, norm_name)
142
+ # s0 is upsampled 2x from s1, representing feature sizes at two resolutions.
143
+ # in_channel * s0 (activation) + 3 * out_channel * s1 (convolution, concatenation, normalization)
144
+ # s0 = s1 * 2^(spatial_dims) = output_size / out_channel * 2^(spatial_dims)
145
+ self.ram_cost = in_channel / out_channel * 2**self._spatial_dims + 3
146
+
147
+
148
+ class MixedOp(nn.Module):
149
+ """
150
+ The weighted averaging of cell operations.
151
+ Args:
152
+ c: number of output channels.
153
+ ops: a dictionary of operations. See also: ``Cell.OPS2D`` or ``Cell.OPS3D``.
154
+ arch_code_c: binary cell operation code. It represents the operation results added to the output.
155
+ """
156
+
157
+ def __init__(self, c: int, ops: dict, arch_code_c=None):
158
+ super().__init__()
159
+ if arch_code_c is None:
160
+ arch_code_c = np.ones(len(ops))
161
+ self.ops = nn.ModuleList()
162
+ for arch_c, op_name in zip(arch_code_c, ops):
163
+ self.ops.append(_CloseWithRAMCost() if arch_c == 0 else ops[op_name](c))
164
+
165
+ def forward(self, x: torch.Tensor, weight: torch.Tensor):
166
+ """
167
+ Args:
168
+ x: input tensor.
169
+ weight: learnable architecture weights for cell operations. arch_code_c are derived from it.
170
+ Return:
171
+ out: weighted average of the operation results.
172
+ """
173
+ out = 0.0
174
+ weight = weight.to(x)
175
+ for idx, _op in enumerate(self.ops):
176
+ out = out + _op(x) * weight[idx]
177
+ return out
178
+
179
+
180
+ class Cell(CellInterface):
181
+ """
182
+ The basic class for cell operation search, which contains a preprocessing operation and a mixed cell operation.
183
+ Each cell is defined on a `path` in the topology search space.
184
+ Args:
185
+ c_prev: number of input channels
186
+ c: number of output channels
187
+ rate: resolution change rate. It represents the preprocessing operation before the mixed cell operation.
188
+ ``-1`` for 2x downsample, ``1`` for 2x upsample, ``0`` for no change of resolution.
189
+ arch_code_c: cell operation code
190
+ """
191
+
192
+ DIRECTIONS = 3
193
+ # Possible output paths for `Cell`.
194
+ #
195
+ # - UpSample
196
+ # /
197
+ # +--+/
198
+ # | |--- Identity or AlignChannels
199
+ # +--+\
200
+ # \
201
+ # - Downsample
202
+
203
+ # Define 2D operation set, parameterized by the number of channels
204
+ OPS2D = {
205
+ "skip_connect": lambda _c: _IdentityWithRAMCost(),
206
+ "conv_3x3": lambda c: _ActiConvNormBlockWithRAMCost(c, c, 3, padding=1, spatial_dims=2),
207
+ }
208
+
209
+ # Define 3D operation set, parameterized by the number of channels
210
+ OPS3D = {
211
+ "skip_connect": lambda _c: _IdentityWithRAMCost(),
212
+ "conv_3x3x3": lambda c: _ActiConvNormBlockWithRAMCost(c, c, 3, padding=1, spatial_dims=3),
213
+ "conv_3x3x1": lambda c: _P3DActiConvNormBlockWithRAMCost(c, c, 3, padding=1, p3dmode=0),
214
+ "conv_3x1x3": lambda c: _P3DActiConvNormBlockWithRAMCost(c, c, 3, padding=1, p3dmode=1),
215
+ "conv_1x3x3": lambda c: _P3DActiConvNormBlockWithRAMCost(c, c, 3, padding=1, p3dmode=2),
216
+ }
217
+
218
+ # Define connection operation set, parameterized by the number of channels
219
+ ConnOPS = {
220
+ "up": _FactorizedIncreaseBlockWithRAMCost,
221
+ "down": _FactorizedReduceBlockWithRAMCost,
222
+ "identity": _IdentityWithRAMCost,
223
+ "align_channels": _ActiConvNormBlockWithRAMCost,
224
+ }
225
+
226
+ def __init__(
227
+ self,
228
+ c_prev: int,
229
+ c: int,
230
+ rate: int,
231
+ arch_code_c=None,
232
+ spatial_dims: int = 3,
233
+ act_name: Union[Tuple, str] = "RELU",
234
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
235
+ ):
236
+ super().__init__()
237
+ self._spatial_dims = spatial_dims
238
+ self._act_name = act_name
239
+ self._norm_name = norm_name
240
+
241
+ if rate == -1: # downsample
242
+ self.preprocess = self.ConnOPS["down"](
243
+ c_prev, c, spatial_dims=self._spatial_dims, act_name=self._act_name, norm_name=self._norm_name
244
+ )
245
+ elif rate == 1: # upsample
246
+ self.preprocess = self.ConnOPS["up"](
247
+ c_prev, c, spatial_dims=self._spatial_dims, act_name=self._act_name, norm_name=self._norm_name
248
+ )
249
+ else:
250
+ if c_prev == c:
251
+ self.preprocess = self.ConnOPS["identity"]()
252
+ else:
253
+ self.preprocess = self.ConnOPS["align_channels"](
254
+ c_prev, c, 1, 0, spatial_dims=self._spatial_dims, act_name=self._act_name, norm_name=self._norm_name
255
+ )
256
+
257
+ # Define 2D operation set, parameterized by the number of channels
258
+ self.OPS2D = {
259
+ "skip_connect": lambda _c: _IdentityWithRAMCost(),
260
+ "conv_3x3": lambda c: _ActiConvNormBlockWithRAMCost(
261
+ c, c, 3, padding=1, spatial_dims=2, act_name=self._act_name, norm_name=self._norm_name
262
+ ),
263
+ }
264
+
265
+ # Define 3D operation set, parameterized by the number of channels
266
+ self.OPS3D = {
267
+ "skip_connect": lambda _c: _IdentityWithRAMCost(),
268
+ "conv_3x3x3": lambda c: _ActiConvNormBlockWithRAMCost(
269
+ c, c, 3, padding=1, spatial_dims=3, act_name=self._act_name, norm_name=self._norm_name
270
+ ),
271
+ "conv_3x3x1": lambda c: _P3DActiConvNormBlockWithRAMCost(
272
+ c, c, 3, padding=1, p3dmode=0, act_name=self._act_name, norm_name=self._norm_name
273
+ ),
274
+ "conv_3x1x3": lambda c: _P3DActiConvNormBlockWithRAMCost(
275
+ c, c, 3, padding=1, p3dmode=1, act_name=self._act_name, norm_name=self._norm_name
276
+ ),
277
+ "conv_1x3x3": lambda c: _P3DActiConvNormBlockWithRAMCost(
278
+ c, c, 3, padding=1, p3dmode=2, act_name=self._act_name, norm_name=self._norm_name
279
+ ),
280
+ }
281
+
282
+ self.OPS = {}
283
+ if self._spatial_dims == 2:
284
+ self.OPS = self.OPS2D
285
+ elif self._spatial_dims == 3:
286
+ self.OPS = self.OPS3D
287
+ else:
288
+ raise NotImplementedError(f"Spatial dimensions {self._spatial_dims} is not supported.")
289
+
290
+ self.op = MixedOp(c, self.OPS, arch_code_c)
291
+
292
+ def forward(self, x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
293
+ """
294
+ Args:
295
+ x: input tensor
296
+ weight: weights for different operations.
297
+ """
298
+ x = self.preprocess(x)
299
+ x = self.op(x, weight)
300
+ return x
301
+
302
+ class TopologyConstruction(nn.Module):
303
+ """
304
+ The base class for `TopologyInstance` and `TopologySearch`.
305
+
306
+ Args:
307
+ arch_code: `[arch_code_a, arch_code_c]`, numpy arrays. The architecture codes defining the model.
308
+ For example, for a ``num_depths=4, num_blocks=12`` search space:
309
+
310
+ - `arch_code_a` is a 12x10 (10 paths) binary matrix representing if a path is activated.
311
+ - `arch_code_c` is a 12x10x5 (5 operations) binary matrix representing if a cell operation is used.
312
+ - `arch_code` in ``__init__()`` is used for creating the network and remove unused network blocks. If None,
313
+
314
+ all paths and cells operations will be used, and must be in the searching stage (is_search=True).
315
+ channel_mul: adjust intermediate channel number, default is 1.
316
+ cell: operation of each node.
317
+ num_blocks: number of blocks (depth in the horizontal direction) of the DiNTS search space.
318
+ num_depths: number of image resolutions of the DiNTS search space: 1, 1/2, 1/4 ... in each dimension.
319
+ use_downsample: use downsample in the stem. If False, the search space will be in resolution [1, 1/2, 1/4, 1/8],
320
+ if True, the search space will be in resolution [1/2, 1/4, 1/8, 1/16].
321
+ device: `'cpu'`, `'cuda'`, or device ID.
322
+
323
+
324
+ Predefined variables:
325
+ `filter_nums`: default to 32. Double the number of channels after downsample.
326
+ topology related variables:
327
+
328
+ - `arch_code2in`: path activation to its incoming node index (resolution). For depth = 4,
329
+ arch_code2in = [0, 1, 0, 1, 2, 1, 2, 3, 2, 3]. The first path outputs from node 0 (top resolution),
330
+ the second path outputs from node 1 (second resolution in the search space),
331
+ the third path outputs from node 0, etc.
332
+ - `arch_code2ops`: path activation to operations of upsample 1, keep 0, downsample -1. For depth = 4,
333
+ arch_code2ops = [0, 1, -1, 0, 1, -1, 0, 1, -1, 0]. The first path does not change
334
+ resolution, the second path perform upsample, the third perform downsample, etc.
335
+ - `arch_code2out`: path activation to its output node index.
336
+ For depth = 4, arch_code2out = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3],
337
+ the first and second paths connects to node 0 (top resolution), the 3,4,5 paths connects to node 1, etc.
338
+ """
339
+
340
+ def __init__(
341
+ self,
342
+ arch_code: Optional[list] = None,
343
+ channel_mul: float = 1.0,
344
+ cell=Cell,
345
+ num_blocks: int = 6,
346
+ num_depths: int = 3,
347
+ spatial_dims: int = 3,
348
+ act_name: Union[Tuple, str] = "RELU",
349
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
350
+ use_downsample: bool = True,
351
+ device: str = "cpu",
352
+ ):
353
+
354
+ super().__init__()
355
+
356
+ self.filter_nums = [int(n_feat * channel_mul) for n_feat in (32, 64, 128, 256, 512)]
357
+ self.num_blocks = num_blocks
358
+ self.num_depths = num_depths
359
+ self._spatial_dims = spatial_dims
360
+ self._act_name = act_name
361
+ self._norm_name = norm_name
362
+ self.use_downsample = use_downsample
363
+ self.device = device
364
+ self.num_cell_ops = 0
365
+ if self._spatial_dims == 2:
366
+ self.num_cell_ops = len(cell.OPS2D)
367
+ elif self._spatial_dims == 3:
368
+ self.num_cell_ops = len(cell.OPS3D)
369
+
370
+ # Calculate predefined parameters for topology search and decoding
371
+ arch_code2in, arch_code2out = [], []
372
+ for i in range(Cell.DIRECTIONS * self.num_depths - 2):
373
+ arch_code2in.append((i + 1) // Cell.DIRECTIONS - 1 + (i + 1) % Cell.DIRECTIONS)
374
+ arch_code2ops = ([-1, 0, 1] * self.num_depths)[1:-1]
375
+ for m in range(self.num_depths):
376
+ arch_code2out.extend([m, m, m])
377
+ arch_code2out = arch_code2out[1:-1]
378
+ self.arch_code2in = arch_code2in
379
+ self.arch_code2ops = arch_code2ops
380
+ self.arch_code2out = arch_code2out
381
+
382
+ # define NAS search space
383
+ if arch_code is None:
384
+ arch_code_a = torch.ones((self.num_blocks, len(self.arch_code2out))).to(self.device)
385
+ arch_code_c = torch.ones((self.num_blocks, len(self.arch_code2out), self.num_cell_ops)).to(self.device)
386
+ else:
387
+ arch_code_a = torch.from_numpy(arch_code[0]).to(self.device)
388
+ arch_code_c = F.one_hot(torch.from_numpy(arch_code[1]).to(torch.int64), self.num_cell_ops).to(self.device)
389
+
390
+ self.arch_code_a = arch_code_a
391
+ self.arch_code_c = arch_code_c
392
+ # define cell operation on each path
393
+ self.cell_tree = nn.ModuleDict()
394
+ for blk_idx in range(self.num_blocks):
395
+ for res_idx in range(len(self.arch_code2out)):
396
+ if self.arch_code_a[blk_idx, res_idx] == 1:
397
+ self.cell_tree[str((blk_idx, res_idx))] = cell(
398
+ self.filter_nums[self.arch_code2in[res_idx] + int(use_downsample)],
399
+ self.filter_nums[self.arch_code2out[res_idx] + int(use_downsample)],
400
+ self.arch_code2ops[res_idx],
401
+ self.arch_code_c[blk_idx, res_idx],
402
+ self._spatial_dims,
403
+ self._act_name,
404
+ self._norm_name,
405
+ )
406
+
407
+ def forward(self, x):
408
+ """This function to be implemented by the architecture instances or search spaces."""
409
+ pass
410
+
411
+ class TopologyInstance(TopologyConstruction):
412
+ """
413
+ Instance of the final searched architecture. Only used in re-training/inference stage.
414
+ """
415
+
416
+ def __init__(
417
+ self,
418
+ arch_code=None,
419
+ channel_mul: float = 1.0,
420
+ cell=Cell,
421
+ num_blocks: int = 6,
422
+ num_depths: int = 3,
423
+ spatial_dims: int = 3,
424
+ act_name: Union[Tuple, str] = "RELU",
425
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
426
+ use_downsample: bool = True,
427
+ device: str = "cpu",
428
+ ):
429
+ """
430
+ Initialize DiNTS topology search space of neural architectures.
431
+ """
432
+ if arch_code is None:
433
+ warnings.warn("arch_code not provided when not searching.")
434
+
435
+ super().__init__(
436
+ arch_code=arch_code,
437
+ channel_mul=channel_mul,
438
+ cell=cell,
439
+ num_blocks=num_blocks,
440
+ num_depths=num_depths,
441
+ spatial_dims=spatial_dims,
442
+ act_name=act_name,
443
+ norm_name=norm_name,
444
+ use_downsample=use_downsample,
445
+ device=device,
446
+ )
447
+
448
+
449
+ def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
450
+ """
451
+ Args:
452
+ x: input tensor.
453
+ """
454
+ # generate path activation probability
455
+ inputs, outputs = x, [torch.tensor(0.0).to(x[0])] * self.num_depths
456
+ for blk_idx in range(self.num_blocks):
457
+ outputs = [torch.tensor(0.0).to(x[0])] * self.num_depths
458
+ for res_idx, activation in enumerate(self.arch_code_a[blk_idx].data):
459
+ if activation:
460
+ mod: CellInterface = self.cell_tree[str((blk_idx, res_idx))]
461
+ _out = mod.forward(
462
+ x=inputs[self.arch_code2in[res_idx]], weight=torch.ones_like(self.arch_code_c[blk_idx, res_idx])
463
+ )
464
+ outputs[self.arch_code2out[res_idx]] = outputs[self.arch_code2out[res_idx]] + _out
465
+ inputs = outputs
466
+
467
+ return inputs
468
+
469
+ class DiNTS(nn.Module):
470
+ """
471
+ Reimplementation of DiNTS based on
472
+ "DiNTS: Differentiable Neural Network Topology Search for 3D Medical Image Segmentation
473
+ <https://arxiv.org/abs/2103.15954>".
474
+
475
+ The model contains a pre-defined multi-resolution stem block (defined in this class) and a
476
+ DiNTS space (defined in :py:class:`monai.networks.nets.TopologyInstance` and
477
+ :py:class:`monai.networks.nets.TopologySearch`).
478
+
479
+ The stem block is for: 1) input downsample and 2) output upsample to original size.
480
+ The model downsamples the input image by 2 (if ``use_downsample=True``).
481
+ The downsampled image is downsampled by [1, 2, 4, 8] times (``num_depths=4``) and used as input to the
482
+ DiNTS search space (``TopologySearch``) or the DiNTS instance (``TopologyInstance``).
483
+
484
+ - ``TopologyInstance`` is the final searched model. The initialization requires the searched architecture codes.
485
+ - ``TopologySearch`` is a multi-path topology and cell operation search space.
486
+ The architecture codes will be initialized as one.
487
+ - ``TopologyConstruction`` is the parent class which constructs the instance and search space.
488
+
489
+ To meet the requirements of the structure, the input size for each spatial dimension should be:
490
+ divisible by 2 ** (num_depths + 1).
491
+
492
+ Args:
493
+ dints_space: DiNTS search space. The value should be instance of `TopologyInstance` or `TopologySearch`.
494
+ in_channels: number of input image channels.
495
+ num_classes: number of output segmentation classes.
496
+ act_name: activation name, default to 'RELU'.
497
+ norm_name: normalization used in convolution blocks. Default to `InstanceNorm`.
498
+ spatial_dims: spatial 2D or 3D inputs.
499
+ use_downsample: use downsample in the stem.
500
+ If ``False``, the search space will be in resolution [1, 1/2, 1/4, 1/8],
501
+ if ``True``, the search space will be in resolution [1/2, 1/4, 1/8, 1/16].
502
+ node_a: node activation numpy matrix. Its shape is `(num_depths, num_blocks + 1)`.
503
+ +1 for multi-resolution inputs.
504
+ In model searching stage, ``node_a`` can be None. In deployment stage, ``node_a`` cannot be None.
505
+ """
506
+
507
+ def __init__(
508
+ self,
509
+ dints_space,
510
+ in_channels: int,
511
+ num_classes: int,
512
+ act_name: Union[Tuple, str] = "RELU",
513
+ norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
514
+ spatial_dims: int = 3,
515
+ use_downsample: bool = True,
516
+ node_a=None,
517
+ ):
518
+ super().__init__()
519
+
520
+ self.dints_space = dints_space
521
+ self.filter_nums = dints_space.filter_nums
522
+ self.num_blocks = dints_space.num_blocks
523
+ self.num_depths = dints_space.num_depths
524
+ if spatial_dims not in (2, 3):
525
+ raise NotImplementedError(f"Spatial dimensions {spatial_dims} is not supported.")
526
+ self._spatial_dims = spatial_dims
527
+ if node_a is None:
528
+ self.node_a = torch.ones((self.num_blocks + 1, self.num_depths))
529
+ else:
530
+ self.node_a = node_a
531
+
532
+ # define stem operations for every block
533
+ conv_type = Conv[Conv.CONV, spatial_dims]
534
+ self.stem_down = nn.ModuleDict()
535
+ self.stem_up = nn.ModuleDict()
536
+ mode = "trilinear" if self._spatial_dims == 3 else "bilinear"
537
+ for res_idx in range(self.num_depths):
538
+ # define downsample stems before DiNTS search
539
+ if use_downsample:
540
+ self.stem_down[str(res_idx)] = StemTS(
541
+ nn.Upsample(scale_factor=1 / (2**res_idx), mode=mode, align_corners=True),
542
+ conv_type(
543
+ in_channels=in_channels,
544
+ out_channels=self.filter_nums[res_idx],
545
+ kernel_size=3,
546
+ stride=1,
547
+ padding=1,
548
+ groups=1,
549
+ bias=False,
550
+ dilation=1,
551
+ ),
552
+ get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=self.filter_nums[res_idx]),
553
+ get_act_layer(name=act_name),
554
+ conv_type(
555
+ in_channels=self.filter_nums[res_idx],
556
+ out_channels=self.filter_nums[res_idx + 1],
557
+ kernel_size=3,
558
+ stride=2,
559
+ padding=1,
560
+ groups=1,
561
+ bias=False,
562
+ dilation=1,
563
+ ),
564
+ get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=self.filter_nums[res_idx + 1]),
565
+ )
566
+ self.stem_up[str(res_idx)] = StemTS(
567
+ get_act_layer(name=act_name),
568
+ conv_type(
569
+ in_channels=self.filter_nums[res_idx + 1],
570
+ out_channels=self.filter_nums[res_idx],
571
+ kernel_size=3,
572
+ stride=1,
573
+ padding=1,
574
+ groups=1,
575
+ bias=False,
576
+ dilation=1,
577
+ ),
578
+ get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=self.filter_nums[res_idx]),
579
+ nn.Upsample(scale_factor=2, mode=mode, align_corners=True),
580
+ )
581
+
582
+ else:
583
+ self.stem_down[str(res_idx)] = StemTS(
584
+ nn.Upsample(scale_factor=1 / (2**res_idx), mode=mode, align_corners=True),
585
+ conv_type(
586
+ in_channels=in_channels,
587
+ out_channels=self.filter_nums[res_idx],
588
+ kernel_size=3,
589
+ stride=1,
590
+ padding=1,
591
+ groups=1,
592
+ bias=False,
593
+ dilation=1,
594
+ ),
595
+ get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=self.filter_nums[res_idx]),
596
+ )
597
+ self.stem_up[str(res_idx)] = StemTS(
598
+ get_act_layer(name=act_name),
599
+ conv_type(
600
+ in_channels=self.filter_nums[res_idx],
601
+ out_channels=self.filter_nums[max(res_idx - 1, 0)],
602
+ kernel_size=3,
603
+ stride=1,
604
+ padding=1,
605
+ groups=1,
606
+ bias=False,
607
+ dilation=1,
608
+ ),
609
+ get_norm_layer(
610
+ name=norm_name, spatial_dims=spatial_dims, channels=self.filter_nums[max(res_idx - 1, 0)]
611
+ ),
612
+ nn.Upsample(scale_factor=2 ** (res_idx != 0), mode=mode, align_corners=True),
613
+ )
614
+
615
+ def weight_parameters(self):
616
+ return [param for name, param in self.named_parameters()]
617
+
618
+ def forward(self, x: torch.Tensor):
619
+ """
620
+ Prediction based on dynamic arch_code.
621
+
622
+ Args:
623
+ x: input tensor.
624
+ """
625
+ inputs = []
626
+ for d in range(self.num_depths):
627
+ # allow multi-resolution input
628
+ _mod_w: StemInterface = self.stem_down[str(d)]
629
+ x_out = _mod_w.forward(x)
630
+ if self.node_a[0][d]:
631
+ inputs.append(x_out)
632
+ else:
633
+ inputs.append(torch.zeros_like(x_out))
634
+
635
+ outputs = self.dints_space(inputs)
636
+
637
+ blk_idx = self.num_blocks - 1
638
+ start = False
639
+ _temp: torch.Tensor = torch.empty(0)
640
+ for res_idx in range(self.num_depths - 1, -1, -1):
641
+ _mod_up: StemInterface = self.stem_up[str(res_idx)]
642
+ if start:
643
+ _temp = _mod_up.forward(outputs[res_idx] + _temp)
644
+ elif self.node_a[blk_idx + 1][res_idx]:
645
+ start = True
646
+ _temp = _mod_up.forward(outputs[res_idx])
647
+
648
+ return outputs[-1], _temp
649
+
650
+ if __name__ == "__main__":
651
+ ckpt = torch.load('./arch_code_cvpr.pth')
652
+ node_a = ckpt["node_a"]
653
+ arch_code_a = ckpt["arch_code_a"]
654
+ arch_code_c = ckpt["arch_code_c"]
655
+
656
+ dints_space = TopologyInstance(
657
+ channel_mul=1.0,
658
+ num_blocks=12,
659
+ num_depths=4,
660
+ use_downsample=True,
661
+ arch_code=[arch_code_a, arch_code_c]
662
+ )
663
+
664
+ net = DiNTS(
665
+ dints_space=dints_space,
666
+ in_channels=1,
667
+ num_classes=3,
668
+ use_downsample=True,
669
+ node_a=node_a,
670
+ )
671
+ input_tensor = torch.zeros(1, 1, 96, 96, 96)
672
+ net(input_tensor)
my_container_sandbox/workspace/difftumor/organ_mask_access/model/Unetpp.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from typing import Sequence, Union
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+ from monai.networks.layers.factories import Conv
18
+ from monai.networks.nets.basic_unet import Down, TwoConv, UpCat
19
+ from monai.utils import ensure_tuple_rep
20
+
21
+
22
+ class BasicUNetPlusPlus(nn.Module):
23
+ def __init__(
24
+ self,
25
+ spatial_dims: int = 3,
26
+ in_channels: int = 1,
27
+ out_channels: int = 2,
28
+ features: Sequence[int] = (32, 32, 64, 128, 256, 32),
29
+ deep_supervision: bool = False,
30
+ act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
31
+ norm: Union[str, tuple] = ("instance", {"affine": True}),
32
+ bias: bool = True,
33
+ dropout: Union[float, tuple] = 0.0,
34
+ upsample: str = "deconv",
35
+ ):
36
+ """
37
+ A UNet++ implementation with 1D/2D/3D supports.
38
+
39
+ Based on:
40
+
41
+ Zhou et al. "UNet++: A Nested U-Net Architecture for Medical Image
42
+ Segmentation". 4th Deep Learning in Medical Image Analysis (DLMIA)
43
+ Workshop, DOI: https://doi.org/10.48550/arXiv.1807.10165
44
+
45
+
46
+ Args:
47
+ spatial_dims: number of spatial dimensions. Defaults to 3 for spatial 3D inputs.
48
+ in_channels: number of input channels. Defaults to 1.
49
+ out_channels: number of output channels. Defaults to 2.
50
+ features: six integers as numbers of features.
51
+ Defaults to ``(32, 32, 64, 128, 256, 32)``,
52
+
53
+ - the first five values correspond to the five-level encoder feature sizes.
54
+ - the last value corresponds to the feature size after the last upsampling.
55
+
56
+ deep_supervision: whether to prune the network at inference time. Defaults to False. If true, returns a list,
57
+ whose elements correspond to outputs at different nodes.
58
+ act: activation type and arguments. Defaults to LeakyReLU.
59
+ norm: feature normalization type and arguments. Defaults to instance norm.
60
+ bias: whether to have a bias term in convolution blocks. Defaults to True.
61
+ According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
62
+ if a conv layer is directly followed by a batch norm layer, bias should be False.
63
+ dropout: dropout ratio. Defaults to no dropout.
64
+ upsample: upsampling mode, available options are
65
+ ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
66
+
67
+ Examples::
68
+
69
+ # for spatial 2D
70
+ >>> net = BasicUNetPlusPlus(spatial_dims=2, features=(64, 128, 256, 512, 1024, 128))
71
+
72
+ # for spatial 2D, with deep supervision enabled
73
+ >>> net = BasicUNetPlusPlus(spatial_dims=2, features=(64, 128, 256, 512, 1024, 128), deep_supervision=True)
74
+
75
+ # for spatial 2D, with group norm
76
+ >>> net = BasicUNetPlusPlus(spatial_dims=2, features=(64, 128, 256, 512, 1024, 128), norm=("group", {"num_groups": 4}))
77
+
78
+ # for spatial 3D
79
+ >>> net = BasicUNetPlusPlus(spatial_dims=3, features=(32, 32, 64, 128, 256, 32))
80
+
81
+ See Also
82
+ - :py:class:`monai.networks.nets.BasicUNet`
83
+ - :py:class:`monai.networks.nets.DynUNet`
84
+ - :py:class:`monai.networks.nets.UNet`
85
+
86
+ """
87
+ super().__init__()
88
+
89
+ self.deep_supervision = deep_supervision
90
+
91
+ fea = ensure_tuple_rep(features, 6)
92
+ print(f"BasicUNetPlusPlus features: {fea}.")
93
+
94
+ self.conv_0_0 = TwoConv(spatial_dims, in_channels, fea[0], act, norm, bias, dropout)
95
+ self.conv_1_0 = Down(spatial_dims, fea[0], fea[1], act, norm, bias, dropout)
96
+ self.conv_2_0 = Down(spatial_dims, fea[1], fea[2], act, norm, bias, dropout)
97
+ self.conv_3_0 = Down(spatial_dims, fea[2], fea[3], act, norm, bias, dropout)
98
+ self.conv_4_0 = Down(spatial_dims, fea[3], fea[4], act, norm, bias, dropout)
99
+
100
+ self.upcat_0_1 = UpCat(spatial_dims, fea[1], fea[0], fea[0], act, norm, bias, dropout, upsample, halves=False)
101
+ self.upcat_1_1 = UpCat(spatial_dims, fea[2], fea[1], fea[1], act, norm, bias, dropout, upsample)
102
+ self.upcat_2_1 = UpCat(spatial_dims, fea[3], fea[2], fea[2], act, norm, bias, dropout, upsample)
103
+ self.upcat_3_1 = UpCat(spatial_dims, fea[4], fea[3], fea[3], act, norm, bias, dropout, upsample)
104
+
105
+ self.upcat_0_2 = UpCat(
106
+ spatial_dims, fea[1], fea[0] * 2, fea[0], act, norm, bias, dropout, upsample, halves=False
107
+ )
108
+ self.upcat_1_2 = UpCat(spatial_dims, fea[2], fea[1] * 2, fea[1], act, norm, bias, dropout, upsample)
109
+ self.upcat_2_2 = UpCat(spatial_dims, fea[3], fea[2] * 2, fea[2], act, norm, bias, dropout, upsample)
110
+
111
+ self.upcat_0_3 = UpCat(
112
+ spatial_dims, fea[1], fea[0] * 3, fea[0], act, norm, bias, dropout, upsample, halves=False
113
+ )
114
+ self.upcat_1_3 = UpCat(spatial_dims, fea[2], fea[1] * 3, fea[1], act, norm, bias, dropout, upsample)
115
+
116
+ self.upcat_0_4 = UpCat(
117
+ spatial_dims, fea[1], fea[0] * 4, fea[5], act, norm, bias, dropout, upsample, halves=False
118
+ )
119
+
120
+
121
+ def forward(self, x: torch.Tensor):
122
+ """
123
+ Args:
124
+ x: input should have spatially N dimensions
125
+ ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N-1])``, N is defined by `dimensions`.
126
+ It is recommended to have ``dim_n % 16 == 0`` to ensure all maxpooling inputs have
127
+ even edge lengths.
128
+
129
+ Returns:
130
+ A torch Tensor of "raw" predictions in shape
131
+ ``(Batch, out_channels, dim_0[, dim_1, ..., dim_N-1])``.
132
+ """
133
+ x_0_0 = self.conv_0_0(x)
134
+ x_1_0 = self.conv_1_0(x_0_0)
135
+ x_0_1 = self.upcat_0_1(x_1_0, x_0_0)
136
+
137
+ x_2_0 = self.conv_2_0(x_1_0)
138
+ x_1_1 = self.upcat_1_1(x_2_0, x_1_0)
139
+ x_0_2 = self.upcat_0_2(x_1_1, torch.cat([x_0_0, x_0_1], dim=1))
140
+
141
+ x_3_0 = self.conv_3_0(x_2_0)
142
+ x_2_1 = self.upcat_2_1(x_3_0, x_2_0)
143
+ x_1_2 = self.upcat_1_2(x_2_1, torch.cat([x_1_0, x_1_1], dim=1))
144
+ x_0_3 = self.upcat_0_3(x_1_2, torch.cat([x_0_0, x_0_1, x_0_2], dim=1))
145
+
146
+ x_4_0 = self.conv_4_0(x_3_0)
147
+ x_3_1 = self.upcat_3_1(x_4_0, x_3_0)
148
+ x_2_2 = self.upcat_2_2(x_3_1, torch.cat([x_2_0, x_2_1], dim=1))
149
+ x_1_3 = self.upcat_1_3(x_2_2, torch.cat([x_1_0, x_1_1, x_1_2], dim=1))
150
+ x_0_4 = self.upcat_0_4(x_1_3, torch.cat([x_0_0, x_0_1, x_0_2, x_0_3], dim=1))
151
+
152
+ return x_4_0, x_0_4
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/DiNTS.cpython-38.pyc ADDED
Binary file (22.4 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SENet.cpython-37.pyc ADDED
Binary file (7.74 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SENet.cpython-38.pyc ADDED
Binary file (7.87 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/SwinUNETR.cpython-38.pyc ADDED
Binary file (27.9 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/Unetpp.cpython-38.pyc ADDED
Binary file (5.89 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/model/__pycache__/Universal_model.cpython-37.pyc ADDED
Binary file (5.36 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/organ_test.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from tqdm import tqdm
7
+ import os
8
+ import argparse
9
+ import time
10
+ from monai.inferers import sliding_window_inference
11
+
12
+ from model.Universal_model import Universal_model
13
+ from dataset.dataloader import get_loader
14
+ from utils.utils import visualize_label
15
+ from utils.utils import TEMPLATE, ORGAN_NAME, NUM_CLASS
16
+
17
+ torch.multiprocessing.set_sharing_strategy('file_system')
18
+
19
+ def cal_dice(pred, true):
20
+ intersection = np.sum(pred[true==1]) * 2.0
21
+ dice = intersection / (np.sum(pred) + np.sum(true))
22
+ return dice
23
+
24
+ def validation(model, ValLoader, val_transforms, args):
25
+
26
+ model.eval()
27
+ dice_list = {}
28
+ for key in TEMPLATE.keys():
29
+ dice_list[key] = np.zeros((2, NUM_CLASS)) # 1st row for dice, 2nd row for count
30
+ for index, batch in enumerate(tqdm(ValLoader)):
31
+ image, name = batch["image"].cuda(), batch["name"]
32
+ print(batch["name"])
33
+ name = name[0]
34
+ save_dir = os.path.join(args.save_path, name, 'predictions')
35
+ os.makedirs(save_dir, exist_ok=True)
36
+ with torch.no_grad():
37
+ pred = sliding_window_inference(image, (args.roi_x, args.roi_y, args.roi_z), 1, model, overlap=0.5, mode='gaussian')
38
+ pred_sigmoid = F.sigmoid(pred)
39
+
40
+ pred_hard = (pred_sigmoid>0.5).to(pred_sigmoid)
41
+ pred_hard = pred_hard.cpu()
42
+ torch.cuda.empty_cache()
43
+
44
+ liver_mask = pred_hard[0][5]
45
+ pancreas_mask = pred_hard[0][10]
46
+ kidney_mask = pred_hard[0][1]
47
+ left_kidney_mask = pred_hard[0][2]
48
+ kidney_mask[left_kidney_mask==1] = 1
49
+ torch.cuda.empty_cache()
50
+
51
+
52
+ batch['liver'] = liver_mask[None,None,:]
53
+ batch['pancreas'] = pancreas_mask[None,None,:]
54
+ batch['kidney'] = kidney_mask[None,None,:]
55
+
56
+
57
+ visualize_label(batch, save_dir, val_transforms)
58
+ import gzip, shutil
59
+ # if os.path.exists(os.path.join(args.data_root_path, name+'.nii.gz')):
60
+ # with open(os.path.join(args.data_root_path, name+'.nii.gz'), 'rb') as f_in:
61
+ # with gzip.open(os.path.join(args.save_path, name, 'ct.nii.gz'), 'wb') as f_out:
62
+ # shutil.copyfileobj(f_in, f_out)
63
+ # elif os.path.exists(os.path.join(args.data_root_path, name+'.nii')):
64
+ # with open(os.path.join(args.data_root_path, name+'.nii'), 'rb') as f_in:
65
+ # with gzip.open(os.path.join(args.save_path, name, 'ct.nii.gz'), 'wb') as f_out:
66
+ # shutil.copyfileobj(f_in, f_out)
67
+
68
+
69
+ if os.path.exists(os.path.join(args.data_root_path, name+'.nii.gz')):
70
+ shutil.copyfile(os.path.join(args.data_root_path, name+'.nii.gz'), os.path.join(args.save_path, name, 'ct.nii.gz'))
71
+ elif os.path.exists(os.path.join(args.data_root_path, name+'.nii')):
72
+ shutil.copyfile(os.path.join(args.data_root_path, name+'.nii'), os.path.join(args.save_path, name, 'ct.nii'))
73
+
74
+ os.rename(os.path.join(save_dir, name+'_liver.nii.gz'), os.path.join(save_dir, 'liver.nii.gz'))
75
+ os.rename(os.path.join(save_dir, name+'_pancreas.nii.gz'), os.path.join(save_dir, 'pancreas.nii.gz'))
76
+ os.rename(os.path.join(save_dir, name+'_kidney.nii.gz'), os.path.join(save_dir, 'kidney.nii.gz'))
77
+ breakpoint()
78
+
79
+ def main():
80
+ parser = argparse.ArgumentParser()
81
+
82
+ ## dataset
83
+ parser.add_argument('--data_root_path', default='/workspace/inputs/', help='data root path')
84
+ parser.add_argument('--save_path', default='/workspace/outputs/', help='data root path')
85
+ parser.add_argument('--batch_size', default=1, type=int, help='batch size')
86
+ parser.add_argument('--num_workers', default=8, type=int, help='workers numebr for DataLoader')
87
+ parser.add_argument('--a_min', default=-175, type=float, help='a_min in ScaleIntensityRanged')
88
+ parser.add_argument('--a_max', default=250, type=float, help='a_max in ScaleIntensityRanged')
89
+ parser.add_argument('--b_min', default=0.0, type=float, help='b_min in ScaleIntensityRanged')
90
+ parser.add_argument('--b_max', default=1.0, type=float, help='b_max in ScaleIntensityRanged')
91
+ parser.add_argument('--space_x', default=1.5, type=float, help='spacing in x direction')
92
+ parser.add_argument('--space_y', default=1.5, type=float, help='spacing in y direction')
93
+ parser.add_argument('--space_z', default=1.5, type=float, help='spacing in z direction')
94
+ parser.add_argument('--roi_x', default=96, type=int, help='roi size in x direction')
95
+ parser.add_argument('--roi_y', default=96, type=int, help='roi size in y direction')
96
+ parser.add_argument('--roi_z', default=96, type=int, help='roi size in z direction')
97
+ parser.add_argument('--num_samples', default=1, type=int, help='sample number in each ct')
98
+
99
+ args = parser.parse_args()
100
+
101
+ # prepare the 3D model
102
+ model = Universal_model(img_size=(args.roi_x, args.roi_y, args.roi_z),
103
+ in_channels=1,
104
+ out_channels=NUM_CLASS,
105
+ backbone='swinunetr',
106
+ encoding='word_embedding'
107
+ )
108
+
109
+ #Load pre-trained weights
110
+ store_dict = model.state_dict()
111
+ checkpoint = torch.load('organ_mask_access/swinunetr.pth')
112
+ load_dict = checkpoint['net']
113
+
114
+ for key, value in load_dict.items():
115
+ if 'swinViT' in key or 'encoder' in key or 'decoder' in key:
116
+ name = '.'.join(key.split('.')[1:])
117
+ name = 'backbone.' + name
118
+ else:
119
+ name = '.'.join(key.split('.')[1:])
120
+ store_dict[name] = value
121
+
122
+
123
+ model.load_state_dict(store_dict)
124
+ print('Use pretrained weights')
125
+
126
+ model.cuda()
127
+
128
+ test_data_list = os.listdir(args.data_root_path)
129
+ test_data_list.sort()
130
+ test_loader, val_transforms = get_loader(args, test_data_list)
131
+
132
+ validation(model, test_loader, val_transforms, args)
133
+
134
+ if __name__ == "__main__":
135
+ main()
my_container_sandbox/workspace/difftumor/organ_mask_access/pretrained_weights/.DS_Store ADDED
Binary file (6.15 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/pretrained_weights/clip_embedding.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import clip
3
+ import torch
4
+
5
+
6
+ ## PAOT
7
+ ORGAN_NAME = ['Spleen', 'Right Kidney', 'Left Kidney', 'Gall Bladder', 'Esophagus',
8
+ 'Liver', 'Stomach', 'Arota', 'Postcava', 'Portal Vein and Splenic Vein',
9
+ 'Pancreas', 'Right Adrenal Gland', 'Left Adrenal Gland', 'Duodenum', 'Hepatic Vessel',
10
+ 'Right Lung', 'Left Lung', 'Colon', 'Intestine', 'Rectum',
11
+ 'Bladder', 'Prostate', 'Left Head of Femur', 'Right Head of Femur', 'Celiac Truck',
12
+ 'Kidney Tumor', 'Liver Tumor', 'Pancreas Tumor', 'Hepatic Vessel Tumor', 'Lung Tumor',
13
+ 'Colon Tumor', 'Kidney Cyst']
14
+
15
+ # Load the model
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+ model, preprocess = clip.load('ViT-B/32', device)
18
+
19
+
20
+ text_inputs = torch.cat([clip.tokenize(f'A computerized tomography of a {item}') for item in ORGAN_NAME]).to(device)
21
+
22
+ # Calculate text embedding features
23
+ with torch.no_grad():
24
+ text_features = model.encode_text(text_inputs)
25
+ print(text_features.shape, text_features.dtype)
26
+ torch.save(text_features, 'txt_encoding.pth')
27
+
my_container_sandbox/workspace/difftumor/organ_mask_access/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ connected-components-3d
2
+ h5py==3.6.0
3
+ monai==0.9.0
4
+ torch==1.11.0
5
+ tqdm
6
+ fastremap
7
+ simpleitk
my_container_sandbox/workspace/difftumor/organ_mask_access/utils/__pycache__/utils.cpython-37.pyc ADDED
Binary file (12.2 kB). View file
 
my_container_sandbox/workspace/difftumor/organ_mask_access/utils/label_transfer.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from monai.transforms import (
2
+ AsDiscrete,
3
+ AddChanneld,
4
+ Compose,
5
+ CropForegroundd,
6
+ LoadImaged,
7
+ Orientationd,
8
+ RandFlipd,
9
+ RandCropByPosNegLabeld,
10
+ RandShiftIntensityd,
11
+ ScaleIntensityRanged,
12
+ Spacingd,
13
+ RandRotate90d,
14
+ ToTensord,
15
+ CenterSpatialCropd,
16
+ Resized,
17
+ SpatialPadd,
18
+ apply_transform,
19
+ )
20
+
21
+ import collections.abc
22
+ import math
23
+ import pickle
24
+ import shutil
25
+ import sys
26
+ import tempfile
27
+ import threading
28
+ import time
29
+ from copy import copy, deepcopy
30
+ import cc3d
31
+ import argparse
32
+ import os
33
+ import h5py
34
+ import warnings
35
+ warnings.filterwarnings("ignore")
36
+
37
+ import numpy as np
38
+ import torch
39
+ from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
40
+
41
+ torch.multiprocessing.set_sharing_strategy('file_system')
42
+
43
+ from monai.data import DataLoader, Dataset, list_data_collate, DistributedSampler
44
+ from monai.config import DtypeLike, KeysCollection
45
+ from monai.transforms.transform import Transform, MapTransform
46
+ from monai.utils.enums import TransformBackends
47
+ from monai.config.type_definitions import NdarrayOrTensor
48
+
49
+ from utils.utils import get_key
50
+
51
+ ORGAN_DATASET_DIR = '/home/jliu288/data/whole_organ/'
52
+ ORGAN_LIST = 'dataset/dataset_list/PAOT.txt'
53
+ NUM_WORKER = 8
54
+ NUM_CLASS = 32
55
+ TRANSFER_LIST = ['10_08']
56
+ ## full list
57
+ # TRANSFER_LIST = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10_03', '10_06', '10_07', '10_08', '10_09', '10_10', '12', '13', '14']
58
+
59
+ TEMPLATE={
60
+ '01': [1,2,3,4,5,6,7,8,9,10,11,12,13,14],
61
+ '02': [1,0,3,4,5,6,7,0,0,0,11,0,0,14],
62
+ '03': [6],
63
+ '04': [6,27], # post process
64
+ '05': [2,26,32], # post process
65
+ '07': [6,1,3,2,7,4,5,11,14,18,19,12,20,21,23,24],
66
+ '08': [6, 2, 1, 11],
67
+ '09': [1,2,3,4,5,6,7,8,9,11,12,13,14,21,22],
68
+ '12': [6,21,16,2],
69
+ '13': [6,2,1,11,8,9,7,4,5,12,13,25],
70
+ '14': [11,11,28,28,28], # Felix data, post process
71
+ '10_03': [6, 27], # post process
72
+ '10_06': [30],
73
+ '10_07': [11, 28], # post process
74
+ '10_08': [15, 29], # post process
75
+ '10_09': [1],
76
+ '10_10': [31]
77
+ }
78
+
79
+ POST_TUMOR_DICT = {
80
+ '04': [(2,27)],
81
+ '05': [(2,26), (3,32)],
82
+ '10_03': [(2,27)],
83
+ '10_07': [(2,28)]
84
+ }
85
+
86
+ def rl_split(input_data, organ_index, right_index, left_index, name):
87
+ '''
88
+ input_data: 3-d tensor [w,h,d], after transform 'Orientationd(keys=["label"], axcodes="RAS")'
89
+ oragn_index: the organ index of interest
90
+ right_index and left_index: the corresponding index in template
91
+ return [1, w, h, d]
92
+ '''
93
+ RIGHT_ORGAN = right_index
94
+ LEFT_ORGAN = left_index
95
+ label_raw = input_data.copy()
96
+ label_in = np.zeros(label_raw.shape)
97
+ label_in[label_raw == organ_index] = 1
98
+
99
+ label_out = cc3d.connected_components(label_in, connectivity=26)
100
+ # print('label_out', organ_index, np.unique(label_out), np.unique(label_in), label_out.shape, np.sum(label_raw == organ_index))
101
+ # assert len(np.unique(label_out)) == 3, f'more than 2 component in this ct for {name} with {np.unique(label_out)} component'
102
+ if len(np.unique(label_out)) > 3:
103
+ count_sum = 0
104
+ values, counts = np.unique(label_out, return_counts=True)
105
+ num_list_sorted = sorted(values, key=lambda x: counts[x])[::-1]
106
+ for i in num_list_sorted[3:]:
107
+ label_out[label_out==i] = 0
108
+ count_sum += counts[i]
109
+ label_new = np.zeros(label_out.shape)
110
+ for tgt, src in enumerate(num_list_sorted[:3]):
111
+ label_new[label_out==src] = tgt
112
+ label_out = label_new
113
+ print(f'In {name}. Delete {len(num_list_sorted[3:])} small regions with {count_sum} voxels')
114
+ a1,b1,c1 = np.where(label_out==1)
115
+ a2,b2,c2 = np.where(label_out==2)
116
+
117
+ label_new = np.zeros(label_out.shape)
118
+ if np.mean(a1) < np.mean(a2):
119
+ label_new[label_out==1] = LEFT_ORGAN
120
+ label_new[label_out==2] = RIGHT_ORGAN
121
+ else:
122
+ label_new[label_out==1] = RIGHT_ORGAN
123
+ label_new[label_out==2] = LEFT_ORGAN
124
+
125
+ return label_new[None]
126
+
127
+ class ToTemplatelabel(Transform):
128
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
129
+
130
+ def __call__(self, lbl: NdarrayOrTensor, totemplate: List, tumor=False, tumor_list=None) -> NdarrayOrTensor:
131
+ new_lbl = np.zeros(lbl.shape)
132
+ for src, tgt in enumerate(totemplate):
133
+ new_lbl[lbl == (src+1)] = tgt
134
+ # unique,count=np.unique(new_lbl,return_counts=True)
135
+ # data_count=dict(zip(unique,count))
136
+ # print(data_count)
137
+ # unique,count=np.unique(lbl,return_counts=True)
138
+ # data_count=dict(zip(unique,count))
139
+ # print(data_count)
140
+ if tumor:
141
+ for src, item in tumor_list:
142
+ new_lbl[new_lbl == item] = totemplate[0]
143
+ return new_lbl
144
+
145
+ class ToTemplatelabeld(MapTransform):
146
+ '''
147
+ Comment: spleen to 1
148
+ '''
149
+ backend = ToTemplatelabel.backend
150
+ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
151
+ super().__init__(keys, allow_missing_keys)
152
+ self.totemplate = ToTemplatelabel()
153
+
154
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
155
+ d = dict(data)
156
+ dataset_index = int(d['name'][0:2])
157
+ TUMOR = False
158
+ tumor_list = None
159
+ if dataset_index == 1 or dataset_index == 2:
160
+ template_key = d['name'][0:2]
161
+ pass
162
+ elif dataset_index == 10:
163
+ template_key = d['name'][0:2] + '_' + d['name'][17:19]
164
+ else:
165
+ template_key = d['name'][0:2]
166
+ if template_key in ['04', '05', '10_03', '10_07', '14']:
167
+ TUMOR = True
168
+ tumor_list = POST_TUMOR_DICT[template_key]
169
+ d['label'] = self.totemplate(d['label'], TEMPLATE[template_key], tumor=TUMOR, tumor_list=tumor_list)
170
+ return d
171
+
172
+ class RL_Split(Transform):
173
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
174
+
175
+ def __call__(self, lbl: NdarrayOrTensor, organ_list: List, name) -> NdarrayOrTensor:
176
+ lbl_new = lbl.copy()
177
+ for organ in organ_list:
178
+ organ_index = organ
179
+ right_index = organ
180
+ left_index = organ + 1
181
+ lbl_post = rl_split(lbl_new[0], organ_index, right_index, left_index, name)
182
+ lbl_new[lbl_post == left_index] = left_index
183
+ return lbl_new
184
+
185
+ class RL_Splitd(MapTransform):
186
+ backend = ToTemplatelabel.backend
187
+ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
188
+ super().__init__(keys, allow_missing_keys)
189
+ self.spliter = RL_Split()
190
+
191
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
192
+ d = dict(data)
193
+ dataset_index = int(d['name'][0:2])
194
+ # print(d['name'], dataset_index)
195
+ if dataset_index in [5,8,13]:
196
+ # print(d['name'], np.unique(d['label']))
197
+ d['label'] = self.spliter(d['label'], [2], d['name'])
198
+ # print(d['name'], np.unique(d['label']))
199
+ elif dataset_index == 7:
200
+ d['label'] = self.spliter(d['label'], [12], d['name'])
201
+ elif dataset_index == 12:
202
+ d['label'] = self.spliter(d['label'], [2, 16], d['name'])
203
+ else:
204
+ pass
205
+ return d
206
+
207
+ def generate_label(input_lbl, num_classes, name, TEMPLATE, raw_lbl):
208
+ """
209
+ Convert class index tensor to one hot encoding tensor with -1 (ignored).
210
+ Args:
211
+ input: A tensor of shape [bs, 1, *]
212
+ num_classes: An int of number of class
213
+ Returns:
214
+ A tensor of shape [bs, num_classes, *]
215
+ Comment: spleen to 0
216
+ """
217
+ shape = np.array(input_lbl.shape)
218
+ shape[1] = num_classes
219
+ shape = tuple(shape)
220
+ result = torch.zeros(shape)
221
+ input_lbl = input_lbl.long()
222
+
223
+ ## generate binary cross entropy label and assign -1 to ignored organ
224
+ B = result.shape[0]
225
+ for b in range(B):
226
+ dataset_index = int(name[b][0:2])
227
+ if dataset_index == 10:
228
+ template_key = name[b][0:2] + '_' + name[b][17:19]
229
+ else:
230
+ template_key = name[b][0:2]
231
+
232
+ # for organ split case
233
+ if dataset_index == 5:
234
+ organ_list = [2,3,26,32]
235
+ elif dataset_index == 7:
236
+ organ_list = [6,1,3,2,7,4,5,11,14,18,19,12,13,20,21,23,24]
237
+ elif dataset_index == 8:
238
+ organ_list = [6, 2, 3, 1, 11]
239
+ elif dataset_index == 12:
240
+ organ_list = [6,21,16,17,2,3]
241
+ elif dataset_index == 13:
242
+ organ_list = [6,2,3,1,11,8,9,7,4,5,12,13,25]
243
+ else:
244
+ organ_list = TEMPLATE[template_key]
245
+
246
+ # -1 for organ not labeled
247
+ for i in range(num_classes):
248
+ if (i+1) not in organ_list:
249
+ result[b, i] = -1
250
+ else:
251
+ result[b, i] = (input_lbl[b][0] == (i+1))
252
+
253
+ # for tumor case
254
+ if template_key in ['04', '05', '10_03', '10_07']:
255
+ tumor_list = POST_TUMOR_DICT[template_key]
256
+ for src, item in tumor_list:
257
+ result[b, item - 1] = (raw_lbl[b][0] == src)
258
+
259
+ if template_key in ['14']:
260
+ tumor_lbl = torch.zeros(raw_lbl.shape)
261
+ tumor_lbl[raw_lbl == 3] = 1
262
+ tumor_lbl[raw_lbl == 4] = 1
263
+ tumor_lbl[raw_lbl == 5] = 1
264
+ result[b, organ_list[-1] - 1] = tumor_lbl[b][0]
265
+ return result
266
+
267
+ label_process = Compose(
268
+ [
269
+ LoadImaged(keys=["image", "label", "label_raw"]),
270
+ AddChanneld(keys=["image", "label", "label_raw"]),
271
+ Orientationd(keys=["image", "label", "label_raw"], axcodes="RAS"),
272
+ ToTemplatelabeld(keys=['label']),
273
+ RL_Splitd(keys=['label']),
274
+ Spacingd(
275
+ keys=["image", "label", "label_raw"],
276
+ pixdim=(1.5, 1.5, 1.5),
277
+ mode=("bilinear", "nearest", "nearest"),), # process h5 to here
278
+ ]
279
+ )
280
+
281
+ train_img = []
282
+ train_lbl = []
283
+ train_name = []
284
+
285
+ for line in open(ORGAN_LIST):
286
+ key = get_key(line.strip().split()[0])
287
+ if key in TRANSFER_LIST:
288
+ train_img.append(ORGAN_DATASET_DIR + line.strip().split()[0])
289
+ train_lbl.append(ORGAN_DATASET_DIR + line.strip().split()[1])
290
+ train_name.append(line.strip().split()[1].split('.')[0])
291
+ data_dicts_train = [{'image': image, 'label': label, 'label_raw': label, 'name': name}
292
+ for image, label, name in zip(train_img, train_lbl, train_name)]
293
+ print('train len {}'.format(len(data_dicts_train)))
294
+
295
+ train_dataset = Dataset(data=data_dicts_train, transform=label_process)
296
+ train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=NUM_WORKER,
297
+ collate_fn=list_data_collate)
298
+
299
+ for index, batch in enumerate(train_loader):
300
+ x, y, y_raw, name = batch["image"], batch["label"], batch['label_raw'], batch['name']
301
+ y = generate_label(y, NUM_CLASS, name, TEMPLATE, y_raw)
302
+ name = batch['name'][0].replace('label', 'post_label')
303
+ print(name)
304
+ post_dir = ORGAN_DATASET_DIR + '/'.join(name.split('/')[:-1])
305
+ store_y = y.numpy().astype(np.uint8)
306
+ if not os.path.exists(post_dir):
307
+ os.makedirs(post_dir)
308
+ with h5py.File(ORGAN_DATASET_DIR + name + '.h5', 'w') as f:
309
+ f.create_dataset('post_label', data=store_y, compression='gzip', compression_opts=9)
310
+ f.close()
my_container_sandbox/workspace/difftumor/organ_mask_access/utils/utils.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import cc3d
3
+ import fastremap
4
+ import csv
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ import numpy as np
10
+ import pandas as pd
11
+ import matplotlib.pyplot as plt
12
+ from sklearn.ensemble import IsolationForest
13
+ #from pyod.models.knn import KNN
14
+ from math import ceil
15
+ from scipy.ndimage.filters import gaussian_filter
16
+ import warnings
17
+ from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union
18
+ from scipy import ndimage
19
+
20
+ from monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size
21
+ from monai.transforms import Resize, Compose
22
+ from monai.utils import (
23
+ BlendMode,
24
+ PytorchPadMode,
25
+ convert_data_type,
26
+ ensure_tuple,
27
+ fall_back_tuple,
28
+ look_up_option,
29
+ optional_import,
30
+ )
31
+
32
+ from monai.data import decollate_batch
33
+ from monai.transforms import Invertd, SaveImaged
34
+
35
+ NUM_CLASS = 32
36
+
37
+
38
+
39
+ TEMPLATE={
40
+ '01': [1,2,3,4,5,6,7,8,9,10,11,12,13,14],
41
+ '01_2': [1,3,4,5,6,7,11,14],
42
+ '02': [1,3,4,5,6,7,11,14],
43
+ '03': [6],
44
+ '04': [6,27], # post process
45
+ '05': [2,3,26,32], # post process
46
+ '06': [1,2,3,4,6,7,11,16,17],
47
+ '07': [6,1,3,2,7,4,5,11,14,18,19,12,13,20,21,23,24],
48
+ '08': [6, 2, 3, 1, 11],
49
+ '09': [1,2,3,4,5,6,7,8,9,11,12,13,14,21,22],
50
+ '12': [6,21,16,17,2,3],
51
+ '13': [6,2,3,1,11,8,9,7,4,5,12,13,25],
52
+ '14': [11, 28],
53
+ '10_03': [6, 27], # post process
54
+ '10_06': [30],
55
+ '10_07': [11, 28], # post process
56
+ '10_08': [15, 29], # post process
57
+ '10_09': [1],
58
+ # '10_10': [31],
59
+ '10_10': [18],
60
+ '15': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17] ## total segmentation
61
+ }
62
+
63
+ ORGAN_NAME = ['Spleen', 'Right Kidney', 'Left Kidney', 'Gall Bladder', 'Esophagus',
64
+ 'Liver', 'Stomach', 'Aorta', 'Postcava', 'Portal Vein and Splenic Vein',
65
+ 'Pancreas', 'Right Adrenal Gland', 'Left Adrenal Gland', 'Duodenum', 'Hepatic Vessel',
66
+ 'Right Lung', 'Left Lung', 'Colon', 'Intestine', 'Rectum',
67
+ 'Bladder', 'Prostate', 'Left Head of Femur', 'Right Head of Femur', 'Celiac Truck',
68
+ 'Kidney Tumor', 'Liver Tumor', 'Pancreas Tumor', 'Hepatic Vessel Tumor', 'Lung Tumor', 'Colon Tumor', 'Kidney Cyst']
69
+
70
+ ## mapping to original setting
71
+ MERGE_MAPPING_v1 = {
72
+ '01': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10), (11,11), (12,12), (13,13), (14,14)],
73
+ '02': [(1,1), (3,3), (4,4), (5,5), (6,6), (7,7), (11,11), (14,14)],
74
+ '03': [(6,1)],
75
+ '04': [(6,1), (27,2)],
76
+ '05': [(2,1), (3,1), (26, 2), (32,3)],
77
+ '06': [(1,1), (2,2), (3,3), (4,4), (6,5), (7,6), (11,7), (16,8), (17,9)],
78
+ '07': [(1,2), (2,4), (3,3), (4,6), (5,7), (6,1), (7,5), (11,8), (12,12), (13,12), (14,9), (18,10), (19,11), (20,13), (21,14), (23,15), (24,16)],
79
+ '08': [(1,3), (2,2), (3,2), (6,1), (11,4)],
80
+ '09': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (11,10), (12,11), (13,12), (14,13), (21,14), (22,15)],
81
+ '10_03': [(6,1), (27,2)],
82
+ '10_06': [(30,1)],
83
+ '10_07': [(11,1), (28,2)],
84
+ '10_08': [(15,1), (29,2)],
85
+ '10_09': [(1,1)],
86
+ # '10_10': [(31,1)],
87
+ '10_10': [(18,1)],
88
+ '12': [(2,4), (3,4), (21,2), (6,1), (16,3), (17,3)],
89
+ '13': [(1,3), (2,2), (3,2), (4,8), (5,9), (6,1), (7,7), (8,5), (9,6), (11,4), (12,10), (13,11), (25,12)],
90
+ '15': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10), (11,11), (12,12), (13,13), (14,14), (16,16), (17,17), (18,18)],
91
+ }
92
+
93
+ ## split left and right organ more than dataset defined
94
+ ## expand on the original class number
95
+ MERGE_MAPPING_v2 = {
96
+ '01': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10), (11,11), (12,12), (13,13), (14,14)],
97
+ '02': [(1,1), (3,3), (4,4), (5,5), (6,6), (7,7), (11,11), (14,14)],
98
+ '03': [(6,1)],
99
+ '04': [(6,1), (27,2)],
100
+ '05': [(2,1), (3,3), (26, 2), (32,3)],
101
+ '06': [(1,1), (2,2), (3,3), (4,4), (6,5), (7,6), (11,7), (16,8), (17,9)],
102
+ '07': [(1,2), (2,4), (3,3), (4,6), (5,7), (6,1), (7,5), (11,8), (12,12), (13,17), (14,9), (18,10), (19,11), (20,13), (21,14), (23,15), (24,16)],
103
+ '08': [(1,3), (2,2), (3,5), (6,1), (11,4)],
104
+ '09': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (11,10), (12,11), (13,12), (14,13), (21,14), (22,15)],
105
+ '10_03': [(6,1), (27,2)],
106
+ '10_06': [(30,1)],
107
+ '10_07': [(11,1), (28,2)],
108
+ '10_08': [(15,1), (29,2)],
109
+ '10_09': [(1,1)],
110
+ '10_10': [(31,1)],
111
+ '12': [(2,4), (3,5), (21,2), (6,1), (16,3), (17,6)],
112
+ '13': [(1,3), (2,2), (3,13), (4,8), (5,9), (6,1), (7,7), (8,5), (9,6), (11,4), (12,10), (13,11), (25,12)],
113
+ '15': [(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10), (11,11), (12,12), (13,13), (14,14), (16,16), (17,17), (18,18)],
114
+ }
115
+
116
+ THRESHOLD_DIC = {
117
+ 'Spleen': 0.5,
118
+ 'Right Kidney': 0.5,
119
+ 'Left Kidney': 0.5,
120
+ 'Gall Bladder': 0.5,
121
+ 'Esophagus': 0.5,
122
+ 'Liver': 0.5,
123
+ 'Stomach': 0.5,
124
+ 'Arota': 0.5,
125
+ 'Postcava': 0.5,
126
+ 'Portal Vein and Splenic Vein': 0.5,
127
+ 'Pancreas': 0.5,
128
+ 'Right Adrenal Gland': 0.5,
129
+ 'Left Adrenal Gland': 0.5,
130
+ 'Duodenum': 0.5,
131
+ 'Hepatic Vessel': 0.5,
132
+ 'Right Lung': 0.5,
133
+ 'Left Lung': 0.5,
134
+ 'Colon': 0.5,
135
+ 'Intestine': 0.5,
136
+ 'Rectum': 0.5,
137
+ 'Bladder': 0.5,
138
+ 'Prostate': 0.5,
139
+ 'Left Head of Femur': 0.5,
140
+ 'Right Head of Femur': 0.5,
141
+ 'Celiac Truck': 0.5,
142
+ 'Kidney Tumor': 0.5,
143
+ 'Liver Tumor': 0.5,
144
+ 'Pancreas Tumor': 0.5,
145
+ 'Hepatic Vessel Tumor': 0.5,
146
+ 'Lung Tumor': 0.5,
147
+ 'Colon Tumor': 0.5,
148
+ 'Kidney Cyst': 0.5
149
+ }
150
+
151
+ TUMOR_SIZE = {
152
+ 'Kidney Tumor': 80,
153
+ 'Liver Tumor': 20,
154
+ 'Pancreas Tumor': 100,
155
+ 'Hepatic Vessel Tumor': 80,
156
+ 'Lung Tumor': 30,
157
+ 'Colon Tumor': 100,
158
+ 'Kidney Cyst': 20
159
+ }
160
+
161
+ TUMOR_NUM = {
162
+ 'Kidney Tumor': 5,
163
+ 'Liver Tumor': 20,
164
+ 'Pancreas Tumor': 1,
165
+ 'Hepatic Vessel Tumor': 10,
166
+ 'Lung Tumor': 10,
167
+ 'Colon Tumor': 3,
168
+ 'Kidney Cyst': 20
169
+ }
170
+
171
+ TUMOR_ORGAN = {
172
+ 'Kidney Tumor': [2,3],
173
+ 'Liver Tumor': [6],
174
+ 'Pancreas Tumor': [11],
175
+ 'Hepatic Vessel Tumor': [15],
176
+ 'Lung Tumor': [16,17],
177
+ 'Colon Tumor': [18],
178
+ 'Kidney Cyst': [2,3]
179
+ }
180
+
181
+
182
+ def organ_post_process(pred_mask, organ_list, save_dir, args):
183
+ post_pred_mask = np.zeros(pred_mask.shape)
184
+ plot_save_path = save_dir
185
+ log_path = args.log_name
186
+ dataset_id = save_dir.split('/')[-2]
187
+ case_id = save_dir.split('/')[-1]
188
+ if not os.path.isdir(plot_save_path):
189
+ os.makedirs(plot_save_path)
190
+ for b in range(pred_mask.shape[0]):
191
+ for organ in organ_list:
192
+ if organ == 11: # both process pancreas and Portal vein and splenic vein
193
+ post_pred_mask[b,10] = extract_topk_largest_candidates(pred_mask[b,10], 1) # for pancreas
194
+ if 10 in organ_list:
195
+ post_pred_mask[b,9] = PSVein_post_process(pred_mask[b,9], post_pred_mask[b,10])
196
+ elif organ == 16:
197
+ try:
198
+ left_lung_mask, right_lung_mask = lung_post_process(pred_mask[b])
199
+ post_pred_mask[b,16] = left_lung_mask
200
+ post_pred_mask[b,15] = right_lung_mask
201
+ except IndexError:
202
+ print('this case does not have lungs!')
203
+ shape_temp = post_pred_mask[b,16].shape
204
+ post_pred_mask[b,16] = np.zeros(shape_temp)
205
+ post_pred_mask[b,15] = np.zeros(shape_temp)
206
+ with open(log_path + '/' + dataset_id +'/anomaly.csv','a',newline='') as f:
207
+ writer = csv.writer(f)
208
+ content = case_id
209
+ writer.writerow([content])
210
+
211
+ right_lung_size = np.sum(post_pred_mask[b,15],axis=(0,1,2))
212
+ left_lung_size = np.sum(post_pred_mask[b,16],axis=(0,1,2))
213
+
214
+ print('left lung size: '+str(left_lung_size))
215
+ print('right lung size: '+str(right_lung_size))
216
+
217
+ #knn_model = KNN(n_neighbors=5,contamination=0.00001)
218
+ right_lung_save_path = plot_save_path+'/right_lung.png'
219
+ left_lung_save_path = plot_save_path+'/left_lung.png'
220
+ total_anomly_slice_number=0
221
+
222
+ if right_lung_size>left_lung_size:
223
+ if right_lung_size/left_lung_size > 4:
224
+ mid_point = int(right_lung_mask.shape[0]/2)
225
+ left_region = np.sum(right_lung_mask[:mid_point,:,:],axis=(0,1,2))
226
+ right_region = np.sum(right_lung_mask[mid_point:,:,:],axis=(0,1,2))
227
+
228
+ if (right_region+1)/(left_region+1)>4:
229
+ print('this case only has right lung')
230
+ post_pred_mask[b,15] = right_lung_mask
231
+ post_pred_mask[b,16] = np.zeros(right_lung_mask.shape)
232
+ elif (left_region+1)/(right_region+1)>4:
233
+ print('this case only has left lung')
234
+ post_pred_mask[b,16] = right_lung_mask
235
+ post_pred_mask[b,15] = np.zeros(right_lung_mask.shape)
236
+ else:
237
+ print('need anomly detection')
238
+ print('start anomly detection at right lung')
239
+ try:
240
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
241
+ pred_mask,post_pred_mask[b,15],right_lung_save_path,b,total_anomly_slice_number)
242
+ post_pred_mask[b,16] = left_lung_mask
243
+ post_pred_mask[b,15] = right_lung_mask
244
+ right_lung_size = np.sum(post_pred_mask[b,15],axis=(0,1,2))
245
+ left_lung_size = np.sum(post_pred_mask[b,16],axis=(0,1,2))
246
+ while right_lung_size/left_lung_size>4 or left_lung_size/right_lung_size>4:
247
+ print('still need anomly detection')
248
+ if right_lung_size>left_lung_size:
249
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
250
+ pred_mask,post_pred_mask[b,15],right_lung_save_path,b,total_anomly_slice_number)
251
+ else:
252
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
253
+ pred_mask,post_pred_mask[b,16],right_lung_save_path,b,total_anomly_slice_number)
254
+ post_pred_mask[b,16] = left_lung_mask
255
+ post_pred_mask[b,15] = right_lung_mask
256
+ right_lung_size = np.sum(post_pred_mask[b,15],axis=(0,1,2))
257
+ left_lung_size = np.sum(post_pred_mask[b,16],axis=(0,1,2))
258
+ print('lung seperation complete')
259
+ except IndexError:
260
+ left_lung_mask, right_lung_mask = lung_post_process(pred_mask[b])
261
+ post_pred_mask[b,16] = left_lung_mask
262
+ post_pred_mask[b,15] = right_lung_mask
263
+ print("cannot seperate two lungs, writing csv")
264
+ with open(log_path + '/' + dataset_id +'/anomaly.csv','a',newline='') as f:
265
+ writer = csv.writer(f)
266
+ content = case_id
267
+ writer.writerow([content])
268
+ else:
269
+ if left_lung_size/right_lung_size > 4:
270
+ mid_point = int(left_lung_mask.shape[0]/2)
271
+ left_region = np.sum(left_lung_mask[:mid_point,:,:],axis=(0,1,2))
272
+ right_region = np.sum(left_lung_mask[mid_point:,:,:],axis=(0,1,2))
273
+ if (right_region+1)/(left_region+1)>4:
274
+ print('this case only has right lung')
275
+ post_pred_mask[b,15] = left_lung_mask
276
+ post_pred_mask[b,16] = np.zeros(left_lung_mask.shape)
277
+ elif (left_region+1)/(right_region+1)>4:
278
+ print('this case only has left lung')
279
+ post_pred_mask[b,16] = left_lung_mask
280
+ post_pred_mask[b,15] = np.zeros(left_lung_mask.shape)
281
+ else:
282
+
283
+ print('need anomly detection')
284
+ print('start anomly detection at left lung')
285
+ try:
286
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
287
+ pred_mask,post_pred_mask[b,16],left_lung_save_path,b,total_anomly_slice_number)
288
+ post_pred_mask[b,16] = left_lung_mask
289
+ post_pred_mask[b,15] = right_lung_mask
290
+ right_lung_size = np.sum(post_pred_mask[b,15],axis=(0,1,2))
291
+ left_lung_size = np.sum(post_pred_mask[b,16],axis=(0,1,2))
292
+ while right_lung_size/left_lung_size>4 or left_lung_size/right_lung_size>4:
293
+ print('still need anomly detection')
294
+ if right_lung_size>left_lung_size:
295
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
296
+ pred_mask,post_pred_mask[b,15],right_lung_save_path,b,total_anomly_slice_number)
297
+ else:
298
+ left_lung_mask,right_lung_mask,total_anomly_slice_number = anomly_detection(
299
+ pred_mask,post_pred_mask[b,16],right_lung_save_path,b,total_anomly_slice_number)
300
+ post_pred_mask[b,16] = left_lung_mask
301
+ post_pred_mask[b,15] = right_lung_mask
302
+ right_lung_size = np.sum(post_pred_mask[b,15],axis=(0,1,2))
303
+ left_lung_size = np.sum(post_pred_mask[b,16],axis=(0,1,2))
304
+
305
+ print('lung seperation complete')
306
+ except IndexError:
307
+ left_lung_mask, right_lung_mask = lung_post_process(pred_mask[b])
308
+ post_pred_mask[b,16] = left_lung_mask
309
+ post_pred_mask[b,15] = right_lung_mask
310
+ print("cannot seperate two lungs, writing csv")
311
+ with open(log_path + '/' + dataset_id +'/anomaly.csv','a',newline='') as f:
312
+ writer = csv.writer(f)
313
+ content = case_id
314
+ writer.writerow([content])
315
+ print('find number of anomaly slice: '+str(total_anomly_slice_number))
316
+ elif organ == 17:
317
+ continue ## the le
318
+ elif organ in [1,2,3,4,5,6,7,8,9,12,13,14,18,19,20,21,22,23,24,25]: ## rest organ index
319
+ post_pred_mask[b,organ-1] = extract_topk_largest_candidates(pred_mask[b,organ-1], 1)
320
+ # elif organ in [28,29,30,31,32]:
321
+ # post_pred_mask[b,organ-1] = extract_topk_largest_candidates(pred_mask[b,organ-1], TUMOR_NUM[ORGAN_NAME[organ-1]], area_least=TUMOR_SIZE[ORGAN_NAME[organ-1]])
322
+ elif organ in [26,27]:
323
+ organ_mask = merge_and_top_organ(pred_mask[b], TUMOR_ORGAN[ORGAN_NAME[organ-1]])
324
+ post_pred_mask[b,organ-1] = organ_region_filter_out(pred_mask[b,organ-1], organ_mask)
325
+ # post_pred_mask[b,organ-1] = extract_topk_largest_candidates(post_pred_mask[b,organ-1], TUMOR_NUM[ORGAN_NAME[organ-1]], area_least=TUMOR_SIZE[ORGAN_NAME[organ-1]])
326
+ else:
327
+ post_pred_mask[b,organ-1] = pred_mask[b,organ-1]
328
+ return post_pred_mask
329
+
330
+ def lung_overlap_post_process(pred_mask):
331
+ new_mask = np.zeros(pred_mask.shape, np.uint8)
332
+ new_mask[pred_mask==1] = 1
333
+ label_out = cc3d.connected_components(new_mask, connectivity=26)
334
+
335
+ areas = {}
336
+ for label, extracted in cc3d.each(label_out, binary=True, in_place=True):
337
+ areas[label] = fastremap.foreground(extracted)
338
+ candidates = sorted(areas.items(), key=lambda item: item[1], reverse=True)
339
+ num_candidates = len(candidates)
340
+ if num_candidates!=1:
341
+ print('start separating two lungs!')
342
+ ONE = int(candidates[0][0])
343
+ TWO = int(candidates[1][0])
344
+
345
+
346
+ print('number of connected components:'+str(len(candidates)))
347
+ a1,b1,c1 = np.where(label_out==ONE)
348
+ a2,b2,c2 = np.where(label_out==TWO)
349
+
350
+ left_lung_mask = np.zeros(label_out.shape)
351
+ right_lung_mask = np.zeros(label_out.shape)
352
+
353
+ if np.mean(a1) < np.mean(a2):
354
+ left_lung_mask[label_out==ONE] = 1
355
+ right_lung_mask[label_out==TWO] = 1
356
+ else:
357
+ right_lung_mask[label_out==ONE] = 1
358
+ left_lung_mask[label_out==TWO] = 1
359
+ erosion_left_lung_size = np.sum(left_lung_mask,axis=(0,1,2))
360
+ erosion_right_lung_size = np.sum(right_lung_mask,axis=(0,1,2))
361
+ print('erosion left lung size:'+str(erosion_left_lung_size))
362
+ print('erosion right lung size:'+ str(erosion_right_lung_size))
363
+ return num_candidates,left_lung_mask, right_lung_mask
364
+ else:
365
+ print('current iteration cannot separate lungs, erosion iteration + 1')
366
+ ONE = int(candidates[0][0])
367
+ print('number of connected components:'+str(len(candidates)))
368
+ lung_mask = np.zeros(label_out.shape)
369
+ lung_mask[label_out == ONE]=1
370
+ lung_overlapped_mask_size = np.sum(lung_mask,axis=(0,1,2))
371
+ print('lung overlapped mask size:' + str(lung_overlapped_mask_size))
372
+
373
+ return num_candidates,lung_mask
374
+
375
+ def find_best_iter_and_masks(lung_mask):
376
+ iter=1
377
+ print('current iteration:' + str(iter))
378
+ struct2 = ndimage.generate_binary_structure(3, 3)
379
+ erosion_mask= ndimage.binary_erosion(lung_mask, structure=struct2,iterations=iter)
380
+ candidates_and_masks = lung_overlap_post_process(erosion_mask)
381
+ while candidates_and_masks[0]==1:
382
+ iter +=1
383
+ print('current iteration:' + str(iter))
384
+ erosion_mask= ndimage.binary_erosion(lung_mask, structure=struct2,iterations=iter)
385
+ candidates_and_masks = lung_overlap_post_process(erosion_mask)
386
+ print('check if components are valid')
387
+ left_lung_erosion_mask = candidates_and_masks[1]
388
+ right_lung_erosion_mask = candidates_and_masks[2]
389
+ left_lung_erosion_mask_size = np.sum(left_lung_erosion_mask,axis = (0,1,2))
390
+ right_lung_erosion_mask_size = np.sum(right_lung_erosion_mask,axis = (0,1,2))
391
+ while left_lung_erosion_mask_size/right_lung_erosion_mask_size>4 or right_lung_erosion_mask_size/left_lung_erosion_mask_size>4:
392
+ print('components still have large difference, erosion interation + 1')
393
+ iter +=1
394
+ print('current iteration:' + str(iter))
395
+ erosion_mask= ndimage.binary_erosion(lung_mask, structure=struct2,iterations=iter)
396
+ candidates_and_masks = lung_overlap_post_process(erosion_mask)
397
+ while candidates_and_masks[0]==1:
398
+ iter +=1
399
+ print('current iteration:' + str(iter))
400
+ erosion_mask= ndimage.binary_erosion(lung_mask, structure=struct2,iterations=iter)
401
+ candidates_and_masks = lung_overlap_post_process(erosion_mask)
402
+ left_lung_erosion_mask = candidates_and_masks[1]
403
+ right_lung_erosion_mask = candidates_and_masks[2]
404
+ left_lung_erosion_mask_size = np.sum(left_lung_erosion_mask,axis = (0,1,2))
405
+ right_lung_erosion_mask_size = np.sum(right_lung_erosion_mask,axis = (0,1,2))
406
+ print('erosion done, best iteration: '+str(iter))
407
+
408
+
409
+
410
+ print('start dilation')
411
+ left_lung_erosion_mask = candidates_and_masks[1]
412
+ right_lung_erosion_mask = candidates_and_masks[2]
413
+
414
+ erosion_part_mask = lung_mask - left_lung_erosion_mask - right_lung_erosion_mask
415
+ left_lung_dist = np.ones(left_lung_erosion_mask.shape)
416
+ right_lung_dist = np.ones(right_lung_erosion_mask.shape)
417
+ left_lung_dist[left_lung_erosion_mask==1]=0
418
+ right_lung_dist[right_lung_erosion_mask==1]=0
419
+ left_lung_dist_map = ndimage.distance_transform_edt(left_lung_dist)
420
+ right_lung_dist_map = ndimage.distance_transform_edt(right_lung_dist)
421
+ left_lung_dist_map[erosion_part_mask==0]=0
422
+ right_lung_dist_map[erosion_part_mask==0]=0
423
+ left_lung_adding_map = left_lung_dist_map < right_lung_dist_map
424
+ right_lung_adding_map = right_lung_dist_map < left_lung_dist_map
425
+
426
+ left_lung_erosion_mask[left_lung_adding_map==1]=1
427
+ right_lung_erosion_mask[right_lung_adding_map==1]=1
428
+
429
+ left_lung_mask = left_lung_erosion_mask
430
+ right_lung_mask = right_lung_erosion_mask
431
+ # left_lung_mask = ndimage.binary_dilation(left_lung_erosion_mask, structure=struct2,iterations=iter)
432
+ # right_lung_mask = ndimage.binary_dilation(right_lung_erosion_mask, structure=struct2,iterations=iter)
433
+ print('dilation complete')
434
+ left_lung_mask_fill_hole = ndimage.binary_fill_holes(left_lung_mask)
435
+ right_lung_mask_fill_hole = ndimage.binary_fill_holes(right_lung_mask)
436
+ left_lung_size = np.sum(left_lung_mask_fill_hole,axis=(0,1,2))
437
+ right_lung_size = np.sum(right_lung_mask_fill_hole,axis=(0,1,2))
438
+ print('new left lung size:'+str(left_lung_size))
439
+ print('new right lung size:' + str(right_lung_size))
440
+ return left_lung_mask_fill_hole,right_lung_mask_fill_hole
441
+
442
+
443
+ # def anomly_detection(pred_mask,post_pred_mask,model,save_path,batch):
444
+ # lung_df = get_dataframe(post_pred_mask)
445
+ # lung_pred_df = fit_model(model,lung_df)
446
+ # plot_anomalies(lung_pred_df,save_dir=save_path)
447
+ # anomly_df = lung_pred_df[lung_pred_df['Predictions']==1]
448
+ # anomly_slice = anomly_df['slice_index'].to_numpy()
449
+ # for s in anomly_slice:
450
+ # pred_mask[batch,15,:,:,s]=0
451
+ # pred_mask[batch,16,:,:,s]=0
452
+ # left_lung_mask, right_lung_mask = lung_post_process(pred_mask[batch])
453
+ # return left_lung_mask, right_lung_mask
454
+
455
+ def anomly_detection(pred_mask, post_pred_mask, save_path, batch, anomly_num):
456
+ total_anomly_slice_number = anomly_num
457
+ df = get_dataframe(post_pred_mask)
458
+ # lung_pred_df = fit_model(model,lung_df)
459
+ lung_df = df[df['array_sum']!=0]
460
+ lung_df['SMA20'] = lung_df['array_sum'].rolling(20,min_periods=1,center=True).mean()
461
+ lung_df['STD20'] = lung_df['array_sum'].rolling(20,min_periods=1,center=True).std()
462
+ lung_df['SMA7'] = lung_df['array_sum'].rolling(7,min_periods=1,center=True).mean()
463
+ lung_df['upper_bound'] = lung_df['SMA20']+2*lung_df['STD20']
464
+ lung_df['Predictions'] = lung_df['array_sum']>lung_df['upper_bound']
465
+ lung_df['Predictions'] = lung_df['Predictions'].astype(int)
466
+ lung_df.dropna(inplace=True)
467
+ anomly_df = lung_df[lung_df['Predictions']==1]
468
+ anomly_slice = anomly_df['slice_index'].to_numpy()
469
+ anomly_value = anomly_df['array_sum'].to_numpy()
470
+ anomly_SMA7 = anomly_df['SMA7'].to_numpy()
471
+
472
+ print('decision made')
473
+ if len(anomly_df)!=0:
474
+ print('anomaly point detected')
475
+ print('check if the anomaly points are real')
476
+ real_anomly_slice = []
477
+ for i in range(len(anomly_df)):
478
+ if anomly_value[i] > anomly_SMA7[i]+200:
479
+ print('the anomaly point is real')
480
+ real_anomly_slice.append(anomly_slice[i])
481
+ total_anomly_slice_number+=1
482
+
483
+ if len(real_anomly_slice)!=0:
484
+
485
+
486
+ plot_anomalies(lung_df,save_dir=save_path)
487
+ print('anomaly detection plot created')
488
+ for s in real_anomly_slice:
489
+ pred_mask[batch,15,:,:,s]=0
490
+ pred_mask[batch,16,:,:,s]=0
491
+ left_lung_mask, right_lung_mask = lung_post_process(pred_mask[batch])
492
+ left_lung_size = np.sum(left_lung_mask,axis=(0,1,2))
493
+ right_lung_size = np.sum(right_lung_mask,axis=(0,1,2))
494
+ print('new left lung size:'+str(left_lung_size))
495
+ print('new right lung size:' + str(right_lung_size))
496
+ return left_lung_mask,right_lung_mask,total_anomly_slice_number
497
+ else:
498
+ print('the anomaly point is not real, start separate overlapping')
499
+ left_lung_mask,right_lung_mask = find_best_iter_and_masks(post_pred_mask)
500
+ return left_lung_mask,right_lung_mask,total_anomly_slice_number
501
+
502
+
503
+ print('overlap detected, start erosion and dilation')
504
+ left_lung_mask,right_lung_mask = find_best_iter_and_masks(post_pred_mask)
505
+
506
+ return left_lung_mask,right_lung_mask,total_anomly_slice_number
507
+
508
+ def get_dataframe(post_pred_mask):
509
+ target_array = post_pred_mask
510
+ target_array_sum = np.sum(target_array,axis=(0,1))
511
+ slice_index = np.arange(target_array.shape[-1])
512
+ df = pd.DataFrame({'slice_index':slice_index,'array_sum':target_array_sum})
513
+ return df
514
+
515
+ def plot_anomalies(df, x='slice_index', y='array_sum',save_dir=None):
516
+ # categories will be having values from 0 to n
517
+ # for each values in 0 to n it is mapped in colormap
518
+ categories = df['Predictions'].to_numpy()
519
+ colormap = np.array(['g', 'r'])
520
+
521
+ f = plt.figure(figsize=(12, 4))
522
+ f = plt.plot(df[x],df['SMA20'],'b')
523
+ f = plt.plot(df[x],df['upper_bound'],'y')
524
+ f = plt.scatter(df[x], df[y], c=colormap[categories],alpha=0.3)
525
+ f = plt.xlabel(x)
526
+ f = plt.ylabel(y)
527
+ plt.legend(['Simple moving average','upper bound','predictions'])
528
+ if save_dir is not None:
529
+ plt.savefig(save_dir)
530
+ plt.clf()
531
+
532
+ def merge_and_top_organ(pred_mask, organ_list):
533
+ ## merge
534
+ out_mask = np.zeros(pred_mask.shape[1:], np.uint8)
535
+ for organ in organ_list:
536
+ out_mask = np.logical_or(out_mask, pred_mask[organ-1])
537
+ ## select the top k, for righr left case
538
+ out_mask = extract_topk_largest_candidates(out_mask, len(organ_list))
539
+
540
+ return out_mask
541
+
542
+ def organ_region_filter_out(tumor_mask, organ_mask):
543
+ ## dialtion
544
+ organ_mask = ndimage.binary_closing(organ_mask, structure=np.ones((5,5,5)))
545
+ organ_mask = ndimage.binary_dilation(organ_mask, structure=np.ones((5,5,5)))
546
+ ## filter out
547
+ tumor_mask = organ_mask * tumor_mask
548
+
549
+ return tumor_mask
550
+
551
+
552
+ def PSVein_post_process(PSVein_mask, pancreas_mask):
553
+ xy_sum_pancreas = pancreas_mask.sum(axis=0).sum(axis=0)
554
+ z_non_zero = np.nonzero(xy_sum_pancreas)
555
+ z_value = np.min(z_non_zero) ## the down side of pancreas
556
+ new_PSVein = PSVein_mask.copy()
557
+ new_PSVein[:,:,:z_value] = 0
558
+ return new_PSVein
559
+
560
+ def lung_post_process(pred_mask):
561
+ new_mask = np.zeros(pred_mask.shape[1:], np.uint8)
562
+ new_mask[pred_mask[15] == 1] = 1
563
+ new_mask[pred_mask[16] == 1] = 1
564
+ label_out = cc3d.connected_components(new_mask, connectivity=26)
565
+
566
+ areas = {}
567
+ for label, extracted in cc3d.each(label_out, binary=True, in_place=True):
568
+ areas[label] = fastremap.foreground(extracted)
569
+ candidates = sorted(areas.items(), key=lambda item: item[1], reverse=True)
570
+
571
+ ONE = int(candidates[0][0])
572
+ TWO = int(candidates[1][0])
573
+
574
+ a1,b1,c1 = np.where(label_out==ONE)
575
+ a2,b2,c2 = np.where(label_out==TWO)
576
+
577
+ left_lung_mask = np.zeros(label_out.shape)
578
+ right_lung_mask = np.zeros(label_out.shape)
579
+
580
+ if np.mean(a1) < np.mean(a2):
581
+ left_lung_mask[label_out==ONE] = 1
582
+ right_lung_mask[label_out==TWO] = 1
583
+ else:
584
+ right_lung_mask[label_out==ONE] = 1
585
+ left_lung_mask[label_out==TWO] = 1
586
+
587
+ return left_lung_mask, right_lung_mask
588
+
589
+ def extract_topk_largest_candidates(npy_mask, organ_num, area_least=0):
590
+ ## npy_mask: w, h, d
591
+ ## organ_num: the maximum number of connected component
592
+ out_mask = np.zeros(npy_mask.shape, np.uint8)
593
+ t_mask = npy_mask.copy()
594
+ keep_topk_largest_connected_object(t_mask, organ_num, area_least, out_mask, 1)
595
+
596
+ return out_mask
597
+
598
+
599
+ def keep_topk_largest_connected_object(npy_mask, k, area_least, out_mask, out_label):
600
+ labels_out = cc3d.connected_components(npy_mask, connectivity=26)
601
+ areas = {}
602
+ for label, extracted in cc3d.each(labels_out, binary=True, in_place=True):
603
+ areas[label] = fastremap.foreground(extracted)
604
+ candidates = sorted(areas.items(), key=lambda item: item[1], reverse=True)
605
+
606
+ for i in range(min(k, len(candidates))):
607
+ if candidates[i][1] > area_least:
608
+ out_mask[labels_out == int(candidates[i][0])] = out_label
609
+
610
+ def threshold_organ(data, organ=None, threshold=None):
611
+ ### threshold the sigmoid value to hard label
612
+ ## data: sigmoid value
613
+ ## threshold_list: a list of organ threshold
614
+ B = data.shape[0]
615
+ threshold_list = []
616
+ if organ:
617
+ THRESHOLD_DIC[organ] = threshold
618
+ for key, value in THRESHOLD_DIC.items():
619
+ threshold_list.append(value)
620
+ threshold_list = torch.tensor(threshold_list).repeat(B, 1).reshape(B,len(threshold_list),1,1,1).cuda()
621
+ pred_hard = data > threshold_list
622
+ return pred_hard
623
+
624
+
625
+ def visualize_label(batch, save_dir, input_transform):
626
+ ### function: save the prediction result into dir
627
+ ## Input
628
+ ## batch: the batch dict output from the monai dataloader
629
+ ## one_channel_label: the predicted reuslt with same shape as label
630
+ ## save_dir: the directory for saving
631
+ ## input_transform: the dataloader transform
632
+ post_transforms = Compose([
633
+ Invertd(
634
+ keys=['liver'], #, 'split_label' "label", 'one_channel_label_v1', 'one_channel_label_v2'
635
+ transform=input_transform,
636
+ orig_keys="image",
637
+ nearest_interp=True,
638
+ to_tensor=True,
639
+ ),
640
+ Invertd(
641
+ keys=['pancreas'], #, 'split_label' "label", 'one_channel_label_v1', 'one_channel_label_v2'
642
+ transform=input_transform,
643
+ orig_keys="image",
644
+ nearest_interp=True,
645
+ to_tensor=True,
646
+ ),
647
+ Invertd(
648
+ keys=['kidney'], #, 'split_label' "label", 'one_channel_label_v1', 'one_channel_label_v2'
649
+ transform=input_transform,
650
+ orig_keys="image",
651
+ nearest_interp=True,
652
+ to_tensor=True,
653
+ ),
654
+ SaveImaged(keys='liver',
655
+ meta_keys="image_meta_dict" ,
656
+ output_dir=save_dir,
657
+ output_postfix="liver", #organ_pseudo
658
+ resample=False,
659
+ separate_folder=False,
660
+ output_dtype=np.uint8
661
+ ),
662
+ SaveImaged(keys='pancreas',
663
+ meta_keys="image_meta_dict" ,
664
+ output_dir=save_dir,
665
+ output_postfix="pancreas", #organ_pseudo
666
+ resample=False,
667
+ separate_folder=False,
668
+ output_dtype=np.uint8
669
+ ),
670
+ SaveImaged(keys='kidney',
671
+ meta_keys="image_meta_dict" ,
672
+ output_dir=save_dir,
673
+ output_postfix="kidney", #organ_pseudo
674
+ resample=False,
675
+ separate_folder=False,
676
+ output_dtype=np.uint8
677
+ ),
678
+ ])
679
+
680
+ batch = [post_transforms(i) for i in decollate_batch(batch)]
681
+
682
+ def get_key(name):
683
+ ## input: name
684
+ ## output: the corresponding key
685
+ dataset_index = int(name[0:2])
686
+ if dataset_index == 10:
687
+ template_key = name[0:2] + '_' + name[17:19]
688
+ else:
689
+ template_key = name[0:2]
690
+ return template_key
691
+
692
+
693
+ def dice_score(preds, labels, spe_sen=False): # on GPU
694
+ ### preds: w,h,d; label: w,h,d
695
+ assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match"
696
+ preds = torch.where(preds > 0.5, 1., 0.)
697
+ predict = preds.contiguous().view(1, -1)
698
+ target = labels.contiguous().view(1, -1)
699
+
700
+ tp = torch.sum(torch.mul(predict, target))
701
+ fn = torch.sum(torch.mul(predict!=1, target))
702
+ fp = torch.sum(torch.mul(predict, target!=1))
703
+ tn = torch.sum(torch.mul(predict!=1, target!=1))
704
+
705
+ den = torch.sum(predict) + torch.sum(target) + 1
706
+
707
+ dice = 2 * tp / den
708
+ recall = tp/(tp+fn)
709
+ precision = tp/(tp+fp)
710
+ specificity = tn/(fp + tn)
711
+
712
+
713
+ # print(dice, recall, precision)
714
+ if spe_sen:
715
+ return dice, recall, precision, specificity
716
+ else:
717
+ return dice, recall, precision
718
+
719
+
720
+ def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
721
+ tmp = np.zeros(patch_size)
722
+ center_coords = [i // 2 for i in patch_size]
723
+ sigmas = [i * sigma_scale for i in patch_size]
724
+ tmp[tuple(center_coords)] = 1
725
+ gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)
726
+ gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1
727
+ gaussian_importance_map = gaussian_importance_map.astype(np.float32)
728
+
729
+ # gaussian_importance_map cannot be 0, otherwise we may end up with nans!
730
+ gaussian_importance_map[gaussian_importance_map == 0] = np.min(
731
+ gaussian_importance_map[gaussian_importance_map != 0])
732
+
733
+ return gaussian_importance_map
734
+
735
+
736
+ def multi_net(net_list, img, task_id):
737
+ # img = torch.from_numpy(img).cuda()
738
+
739
+ padded_prediction = net_list[0](img, task_id)
740
+ padded_prediction = F.sigmoid(padded_prediction)
741
+ for i in range(1, len(net_list)):
742
+ padded_prediction_i = net_list[i](img, task_id)
743
+ padded_prediction_i = F.sigmoid(padded_prediction_i)
744
+ padded_prediction += padded_prediction_i
745
+ padded_prediction /= len(net_list)
746
+ return padded_prediction#.cpu().data.numpy()
747
+
748
+
749
+ def check_data(dataset_check):
750
+ img = dataset_check[0]["image"]
751
+ label = dataset_check[0]["label"]
752
+ print(dataset_check[0]["name"])
753
+ img_shape = img.shape
754
+ label_shape = label.shape
755
+ print(f"image shape: {img_shape}, label shape: {label_shape}")
756
+ print(torch.unique(label[0, :, :, 150]))
757
+ plt.figure("image", (18, 6))
758
+ plt.subplot(1, 2, 1)
759
+ plt.title("image")
760
+ plt.imshow(img[0, :, :, 150].detach().cpu(), cmap="gray")
761
+ plt.subplot(1, 2, 2)
762
+ plt.title("label")
763
+ plt.imshow(label[0, :, :, 150].detach().cpu())
764
+ plt.show()
765
+
766
+ if __name__ == "__main__":
767
+ threshold_organ(torch.zeros(1,12,1))
my_container_sandbox/workspace/difftumor/tumor_mask_access/.DS_Store ADDED
Binary file (6.15 kB). View file
 
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/__pycache__/task_params.cpython-38.pyc ADDED
Binary file (1.43 kB). View file
 
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/calculate_task_params.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ import os
13
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
14
+
15
+ from monai.data import (
16
+ Dataset,
17
+ DatasetSummary,
18
+ load_decathlon_datalist,
19
+ load_decathlon_properties,
20
+ )
21
+ from monai.transforms import LoadImaged
22
+
23
+ from task_params import task_name
24
+
25
+
26
+ def get_task_params(args):
27
+ """
28
+ This function is used to achieve the spacings of decathlon dataset.
29
+ In addition, for CT images (task 03, 06, 07, 08, 09 and 10), this function
30
+ also prints the mean and std values (used for normalization), and the min (0.5 percentile)
31
+ and max(99.5 percentile) values (used for clip).
32
+
33
+ """
34
+ task_id = args.task_id
35
+ root_dir = args.root_dir
36
+ datalist_path = args.datalist_path
37
+ dataset_path = os.path.join(root_dir, task_name[task_id])
38
+ datalist_name = "dataset_task{}.json".format(task_id)
39
+
40
+ # get all training data
41
+ datalist = load_decathlon_datalist(os.path.join(datalist_path, datalist_name), True, "training", dataset_path)
42
+
43
+ # get modality info.
44
+ properties = load_decathlon_properties(os.path.join(datalist_path, datalist_name), "modality")
45
+
46
+ dataset = Dataset(
47
+ data=datalist,
48
+ transform=LoadImaged(keys=["image", "label"]),
49
+ )
50
+
51
+ calculator = DatasetSummary(dataset, num_workers=4)
52
+ target_spacing = calculator.get_target_spacing()
53
+ print("spacing: ", target_spacing)
54
+ if properties["modality"]["0"] == "CT":
55
+ print("CT input, calculate statistics:")
56
+ calculator.calculate_statistics()
57
+ print("mean: ", calculator.data_mean, " std: ", calculator.data_std)
58
+ calculator.calculate_percentiles(sampling_flag=True, interval=10, min_percentile=0.5, max_percentile=99.5)
59
+ print(
60
+ "min: ",
61
+ calculator.data_min_percentile,
62
+ " max: ",
63
+ calculator.data_max_percentile,
64
+ )
65
+ else:
66
+ print("non CT input, skip calculating.")
67
+
68
+
69
+ if __name__ == "__main__":
70
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
71
+ parser.add_argument("-task_id", "--task_id", type=str, default="04", help="task 01 to 10")
72
+ parser.add_argument(
73
+ "-root_dir",
74
+ "--root_dir",
75
+ type=str,
76
+ default="/home/v-qichen2/qic/data/CT/10_Decathlon/",
77
+ help="dataset path",
78
+ )
79
+ parser.add_argument(
80
+ "-datalist_path",
81
+ "--datalist_path",
82
+ type=str,
83
+ default="config/",
84
+ )
85
+
86
+ args = parser.parse_args()
87
+ get_task_params(args)
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task01/finetune_multi_gpu.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+
15
+ lr=1e-2
16
+ fold=0
17
+ weight=model.pt
18
+
19
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
20
+ --master_addr="localhost" --master_port=1234 \
21
+ train.py -fold $fold -train_num_workers 4 -interval 10 -num_samples 1 \
22
+ -learning_rate $lr -max_epochs 1000 -task_id 01 -pos_sample_num 1 \
23
+ -expr_name baseline -tta_val True -checkpoint $weight -multi_gpu True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task02/finetune.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+
15
+ lr=1e-2
16
+ fold=0
17
+ weight=model.pt
18
+
19
+ python train.py -fold $fold -train_num_workers 4 -interval 1 -num_samples 4 \
20
+ -learning_rate $lr -max_epochs 500 -task_id 02 -pos_sample_num 1 \
21
+ -expr_name baseline -tta_val True -checkpoint $weight -determinism_flag True \
22
+ -determinism_seed 0
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task03/finetune_multi_gpu.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+
15
+ lr=1e-2
16
+ fold=0
17
+ weight=model.pt
18
+
19
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
20
+ --master_addr="localhost" --master_port=1234 \
21
+ train.py -fold $fold -train_num_workers 8 -interval 20 -num_samples 1 \
22
+ -learning_rate $lr -max_epochs 2000 -task_id 03 -pos_sample_num 1 \
23
+ -expr_name baseline -tta_val True -checkpoint $weight -multi_gpu True \
24
+ -eval_overlap 0.5 -sw_batch_size 2 -batch_dice True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/train.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 1, with large learning rate
13
+
14
+ lr=1e-1
15
+ fold=0
16
+
17
+ python train.py -fold $fold -train_num_workers 4 -interval 1 -num_samples 1 \
18
+ -learning_rate $lr -max_epochs 500 -task_id 04 -pos_sample_num 2 \
19
+ -expr_name baseline -tta_val True -determinism_flag True -determinism_seed 0
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/train_multi_gpu.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 1, with large learning rate
13
+
14
+ lr=1e-1
15
+ fold=0
16
+
17
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
18
+ --master_addr="localhost" --master_port=1234 \
19
+ train.py -fold $fold -train_num_workers 4 -interval 1 -num_samples 1 \
20
+ -learning_rate $lr -max_epochs 500 -task_id 04 -pos_sample_num 2 \
21
+ -expr_name baseline -tta_val True -multi_gpu True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task04/val_multi_gpu.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # please replace the weight variable into your actual weight
13
+
14
+ weight=model.pt
15
+ fold=0
16
+
17
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
18
+ --master_addr="localhost" --master_port=1234 \
19
+ train.py -fold $fold -expr_name baseline -task_id 04 -tta_val True \
20
+ -checkpoint $weight -mode val -multi_gpu True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task05/finetune.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+
15
+ lr=1e-2
16
+ fold=0
17
+ weight=model.pt
18
+
19
+ python train.py -fold $fold -train_num_workers 4 -interval 1 -num_samples 4 \
20
+ -learning_rate $lr -max_epochs 1000 -task_id 05 -pos_sample_num 1 \
21
+ -expr_name baseline -tta_val True -checkpoint $weight -determinism_flag True \
22
+ -determinism_seed 0
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task06/finetune.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+
15
+ lr=1e-3
16
+ fold=0
17
+ weight=model.pt
18
+
19
+ python train.py -fold $fold -train_num_workers 4 -interval 5 -num_samples 1 \
20
+ -learning_rate $lr -max_epochs 1000 -task_id 06 -pos_sample_num 1 \
21
+ -expr_name baseline -tta_val True -checkpoint $weight -determinism_flag True \
22
+ -determinism_seed 0 -batch_dice True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task07/finetune_multi_gpu.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+ # since this task uses lr scheduler, please set the lr and max epochs
15
+ # here according to the step 1 training results. The value of max epochs equals
16
+ # to 2000 minus the best epoch in step 1.
17
+
18
+ lr=5e-3
19
+ max_epochs=1000
20
+ fold=0
21
+ weight=model.pt
22
+
23
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
24
+ --master_addr="localhost" --master_port=1234 \
25
+ train.py -fold $fold -train_num_workers 4 -interval 10 -num_samples 1 \
26
+ -learning_rate $lr -max_epochs $max_epochs -task_id 07 -pos_sample_num 1 \
27
+ -expr_name baseline -tta_val True -checkpoint $weight -multi_gpu True \
28
+ -lr_decay True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/commands/task08/finetune_multi_gpu.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # train step 2, finetune with small learning rate
13
+ # please replace the weight variable into your actual weight
14
+ # since this task uses lr scheduler, please set the lr and max epochs
15
+ # here according to the step 1 training results. The value of max epochs equals
16
+ # to 2000 minus the best epoch in step 1.
17
+
18
+ lr=5e-3
19
+ max_epochs=1000
20
+ fold=0
21
+ weight=model.pt
22
+
23
+ python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 --node_rank=0 \
24
+ --master_addr="localhost" --master_port=1234 \
25
+ train.py -fold $fold -train_num_workers 4 -interval 10 -num_samples 1 \
26
+ -learning_rate $lr -max_epochs $max_epochs -task_id 08 -pos_sample_num 1 \
27
+ -expr_name baseline -tta_val True -checkpoint $weight -multi_gpu True \
28
+ -lr_decay True
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "LeftAtrium", "description": "Left atrium segmentation", "tensorImageSize": "3D", "reference": "King\u2019s College London", "licence": "CC-BY-SA 4.0", "relase": "1.0 04/05/2018", "modality": {"0": "MRI"}, "labels": {"0": "background", "1": "left atrium"}, "numTraining": 20, "numTest": 10, "training": [{"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}], "test": ["./imagesTs/la_015.nii.gz", "./imagesTs/la_025.nii.gz", "./imagesTs/la_013.nii.gz", "./imagesTs/la_001.nii.gz", "./imagesTs/la_027.nii.gz", "./imagesTs/la_006.nii.gz", "./imagesTs/la_008.nii.gz", "./imagesTs/la_012.nii.gz", "./imagesTs/la_028.nii.gz", "./imagesTs/la_002.nii.gz"], "validation_fold0": [{"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}], "train_fold0": [{"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}, {"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}], "validation_fold1": [{"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}], "train_fold1": [{"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}, {"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}], "validation_fold2": [{"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}], "train_fold2": [{"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}], "validation_fold3": [{"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}], "train_fold3": [{"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}, {"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}], "validation_fold4": [{"image": "./imagesTr/la_004.nii.gz", "label": "./labelsTr/la_004.nii.gz"}, {"image": "./imagesTr/la_005.nii.gz", "label": "./labelsTr/la_005.nii.gz"}, {"image": "./imagesTr/la_009.nii.gz", "label": "./labelsTr/la_009.nii.gz"}, {"image": "./imagesTr/la_010.nii.gz", "label": "./labelsTr/la_010.nii.gz"}], "train_fold4": [{"image": "./imagesTr/la_003.nii.gz", "label": "./labelsTr/la_003.nii.gz"}, {"image": "./imagesTr/la_007.nii.gz", "label": "./labelsTr/la_007.nii.gz"}, {"image": "./imagesTr/la_011.nii.gz", "label": "./labelsTr/la_011.nii.gz"}, {"image": "./imagesTr/la_014.nii.gz", "label": "./labelsTr/la_014.nii.gz"}, {"image": "./imagesTr/la_016.nii.gz", "label": "./labelsTr/la_016.nii.gz"}, {"image": "./imagesTr/la_017.nii.gz", "label": "./labelsTr/la_017.nii.gz"}, {"image": "./imagesTr/la_018.nii.gz", "label": "./labelsTr/la_018.nii.gz"}, {"image": "./imagesTr/la_019.nii.gz", "label": "./labelsTr/la_019.nii.gz"}, {"image": "./imagesTr/la_020.nii.gz", "label": "./labelsTr/la_020.nii.gz"}, {"image": "./imagesTr/la_021.nii.gz", "label": "./labelsTr/la_021.nii.gz"}, {"image": "./imagesTr/la_022.nii.gz", "label": "./labelsTr/la_022.nii.gz"}, {"image": "./imagesTr/la_023.nii.gz", "label": "./labelsTr/la_023.nii.gz"}, {"image": "./imagesTr/la_024.nii.gz", "label": "./labelsTr/la_024.nii.gz"}, {"image": "./imagesTr/la_026.nii.gz", "label": "./labelsTr/la_026.nii.gz"}, {"image": "./imagesTr/la_029.nii.gz", "label": "./labelsTr/la_029.nii.gz"}, {"image": "./imagesTr/la_030.nii.gz", "label": "./labelsTr/la_030.nii.gz"}]}
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task04.json ADDED
The diff for this file is too large to render. See raw diff
 
my_container_sandbox/workspace/difftumor/tumor_mask_access/dynunet_pipeline/config/dataset_task06.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "Lung", "description": "Lung and cancer segmentation", "reference": "The Cancer Imaging Archive", "licence": "CC-BY-SA 4.0", "relase": "1.0 04/05/2018", "tensorImageSize": "3D", "modality": {"0": "CT"}, "labels": {"0": "background", "1": "cancer"}, "numTraining": 63, "numTest": 32, "training": [{"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}, {"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}], "test": ["./imagesTs/lung_030.nii.gz", "./imagesTs/lung_077.nii.gz", "./imagesTs/lung_088.nii.gz", "./imagesTs/lung_032.nii.gz", "./imagesTs/lung_067.nii.gz", "./imagesTs/lung_008.nii.gz", "./imagesTs/lung_090.nii.gz", "./imagesTs/lung_082.nii.gz", "./imagesTs/lung_012.nii.gz", "./imagesTs/lung_063.nii.gz", "./imagesTs/lung_024.nii.gz", "./imagesTs/lung_002.nii.gz", "./imagesTs/lung_089.nii.gz", "./imagesTs/lung_085.nii.gz", "./imagesTs/lung_076.nii.gz", "./imagesTs/lung_068.nii.gz", "./imagesTs/lung_019.nii.gz", "./imagesTs/lung_007.nii.gz", "./imagesTs/lung_052.nii.gz", "./imagesTs/lung_040.nii.gz", "./imagesTs/lung_017.nii.gz", "./imagesTs/lung_021.nii.gz", "./imagesTs/lung_050.nii.gz", "./imagesTs/lung_087.nii.gz", "./imagesTs/lung_013.nii.gz", "./imagesTs/lung_091.nii.gz", "./imagesTs/lung_039.nii.gz", "./imagesTs/lung_056.nii.gz", "./imagesTs/lung_035.nii.gz", "./imagesTs/lung_060.nii.gz", "./imagesTs/lung_072.nii.gz", "./imagesTs/lung_011.nii.gz"], "validation_fold0": [{"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}], "train_fold0": [{"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}, {"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}], "validation_fold1": [{"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}], "train_fold1": [{"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}], "validation_fold2": [{"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}], "train_fold2": [{"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}, {"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}], "validation_fold3": [{"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}], "train_fold3": [{"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}, {"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}], "validation_fold4": [{"image": "./imagesTr/lung_003.nii.gz", "label": "./labelsTr/lung_003.nii.gz"}, {"image": "./imagesTr/lung_025.nii.gz", "label": "./labelsTr/lung_025.nii.gz"}, {"image": "./imagesTr/lung_045.nii.gz", "label": "./labelsTr/lung_045.nii.gz"}, {"image": "./imagesTr/lung_051.nii.gz", "label": "./labelsTr/lung_051.nii.gz"}, {"image": "./imagesTr/lung_054.nii.gz", "label": "./labelsTr/lung_054.nii.gz"}, {"image": "./imagesTr/lung_055.nii.gz", "label": "./labelsTr/lung_055.nii.gz"}, {"image": "./imagesTr/lung_061.nii.gz", "label": "./labelsTr/lung_061.nii.gz"}, {"image": "./imagesTr/lung_073.nii.gz", "label": "./labelsTr/lung_073.nii.gz"}, {"image": "./imagesTr/lung_092.nii.gz", "label": "./labelsTr/lung_092.nii.gz"}, {"image": "./imagesTr/lung_093.nii.gz", "label": "./labelsTr/lung_093.nii.gz"}, {"image": "./imagesTr/lung_095.nii.gz", "label": "./labelsTr/lung_095.nii.gz"}, {"image": "./imagesTr/lung_096.nii.gz", "label": "./labelsTr/lung_096.nii.gz"}], "train_fold4": [{"image": "./imagesTr/lung_001.nii.gz", "label": "./labelsTr/lung_001.nii.gz"}, {"image": "./imagesTr/lung_004.nii.gz", "label": "./labelsTr/lung_004.nii.gz"}, {"image": "./imagesTr/lung_005.nii.gz", "label": "./labelsTr/lung_005.nii.gz"}, {"image": "./imagesTr/lung_006.nii.gz", "label": "./labelsTr/lung_006.nii.gz"}, {"image": "./imagesTr/lung_009.nii.gz", "label": "./labelsTr/lung_009.nii.gz"}, {"image": "./imagesTr/lung_010.nii.gz", "label": "./labelsTr/lung_010.nii.gz"}, {"image": "./imagesTr/lung_014.nii.gz", "label": "./labelsTr/lung_014.nii.gz"}, {"image": "./imagesTr/lung_015.nii.gz", "label": "./labelsTr/lung_015.nii.gz"}, {"image": "./imagesTr/lung_016.nii.gz", "label": "./labelsTr/lung_016.nii.gz"}, {"image": "./imagesTr/lung_018.nii.gz", "label": "./labelsTr/lung_018.nii.gz"}, {"image": "./imagesTr/lung_020.nii.gz", "label": "./labelsTr/lung_020.nii.gz"}, {"image": "./imagesTr/lung_022.nii.gz", "label": "./labelsTr/lung_022.nii.gz"}, {"image": "./imagesTr/lung_023.nii.gz", "label": "./labelsTr/lung_023.nii.gz"}, {"image": "./imagesTr/lung_026.nii.gz", "label": "./labelsTr/lung_026.nii.gz"}, {"image": "./imagesTr/lung_027.nii.gz", "label": "./labelsTr/lung_027.nii.gz"}, {"image": "./imagesTr/lung_028.nii.gz", "label": "./labelsTr/lung_028.nii.gz"}, {"image": "./imagesTr/lung_029.nii.gz", "label": "./labelsTr/lung_029.nii.gz"}, {"image": "./imagesTr/lung_031.nii.gz", "label": "./labelsTr/lung_031.nii.gz"}, {"image": "./imagesTr/lung_033.nii.gz", "label": "./labelsTr/lung_033.nii.gz"}, {"image": "./imagesTr/lung_034.nii.gz", "label": "./labelsTr/lung_034.nii.gz"}, {"image": "./imagesTr/lung_036.nii.gz", "label": "./labelsTr/lung_036.nii.gz"}, {"image": "./imagesTr/lung_037.nii.gz", "label": "./labelsTr/lung_037.nii.gz"}, {"image": "./imagesTr/lung_038.nii.gz", "label": "./labelsTr/lung_038.nii.gz"}, {"image": "./imagesTr/lung_041.nii.gz", "label": "./labelsTr/lung_041.nii.gz"}, {"image": "./imagesTr/lung_042.nii.gz", "label": "./labelsTr/lung_042.nii.gz"}, {"image": "./imagesTr/lung_043.nii.gz", "label": "./labelsTr/lung_043.nii.gz"}, {"image": "./imagesTr/lung_044.nii.gz", "label": "./labelsTr/lung_044.nii.gz"}, {"image": "./imagesTr/lung_046.nii.gz", "label": "./labelsTr/lung_046.nii.gz"}, {"image": "./imagesTr/lung_047.nii.gz", "label": "./labelsTr/lung_047.nii.gz"}, {"image": "./imagesTr/lung_048.nii.gz", "label": "./labelsTr/lung_048.nii.gz"}, {"image": "./imagesTr/lung_049.nii.gz", "label": "./labelsTr/lung_049.nii.gz"}, {"image": "./imagesTr/lung_053.nii.gz", "label": "./labelsTr/lung_053.nii.gz"}, {"image": "./imagesTr/lung_057.nii.gz", "label": "./labelsTr/lung_057.nii.gz"}, {"image": "./imagesTr/lung_058.nii.gz", "label": "./labelsTr/lung_058.nii.gz"}, {"image": "./imagesTr/lung_059.nii.gz", "label": "./labelsTr/lung_059.nii.gz"}, {"image": "./imagesTr/lung_062.nii.gz", "label": "./labelsTr/lung_062.nii.gz"}, {"image": "./imagesTr/lung_064.nii.gz", "label": "./labelsTr/lung_064.nii.gz"}, {"image": "./imagesTr/lung_065.nii.gz", "label": "./labelsTr/lung_065.nii.gz"}, {"image": "./imagesTr/lung_066.nii.gz", "label": "./labelsTr/lung_066.nii.gz"}, {"image": "./imagesTr/lung_069.nii.gz", "label": "./labelsTr/lung_069.nii.gz"}, {"image": "./imagesTr/lung_070.nii.gz", "label": "./labelsTr/lung_070.nii.gz"}, {"image": "./imagesTr/lung_071.nii.gz", "label": "./labelsTr/lung_071.nii.gz"}, {"image": "./imagesTr/lung_074.nii.gz", "label": "./labelsTr/lung_074.nii.gz"}, {"image": "./imagesTr/lung_075.nii.gz", "label": "./labelsTr/lung_075.nii.gz"}, {"image": "./imagesTr/lung_078.nii.gz", "label": "./labelsTr/lung_078.nii.gz"}, {"image": "./imagesTr/lung_079.nii.gz", "label": "./labelsTr/lung_079.nii.gz"}, {"image": "./imagesTr/lung_080.nii.gz", "label": "./labelsTr/lung_080.nii.gz"}, {"image": "./imagesTr/lung_081.nii.gz", "label": "./labelsTr/lung_081.nii.gz"}, {"image": "./imagesTr/lung_083.nii.gz", "label": "./labelsTr/lung_083.nii.gz"}, {"image": "./imagesTr/lung_084.nii.gz", "label": "./labelsTr/lung_084.nii.gz"}, {"image": "./imagesTr/lung_086.nii.gz", "label": "./labelsTr/lung_086.nii.gz"}]}