input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'66:53:72',
'66:53:73',
'66:53:74',
'66:53:75',
'66:53:76',
'66:53:77',
'66:53:78',
'66:53:79',
'66:53:80',
'66:53:81',
'66:53:82',
'66:53:83',
'66:53:84',
'66:53:85',
'66:53:86',
'66:53:87',
'66:53:88',
'66:53:89',
'66:53:90',
'66:53:91',
'66:53:92',
'66:53:93',
'66:53:94',
'66:53:95',
'66:53:96',
'66:53:97',
'66:53:98',
'66:53:99',
'66:54:00',
'66:54:01',
'66:54:02',
'66:54:03',
'66:54:04',
'66:54:05',
'66:54:06',
'66:54:07',
'66:54:08',
'66:54:09',
'66:54:10',
'66:54:11',
'66:54:12',
'66:54:13',
'66:54:14',
'66:54:15',
'66:54:16',
'66:54:17',
'66:54:18',
'66:54:19',
'66:54:20',
'66:54:21',
'66:54:22',
'66:54:23',
'66:54:24',
'66:54:25',
'66:54:26',
'66:54:27',
'66:54:28',
'66:54:29',
'66:54:30',
'66:54:31',
'66:54:32',
'66:54:33',
'66:54:34',
'66:54:35',
'66:54:36',
'66:54:37',
'66:54:38',
'66:54:39',
'66:54:40',
'66:54:41',
'66:54:42',
'66:54:43',
'66:54:44',
'66:54:45',
'66:54:46',
'66:54:47',
'66:54:48',
'66:54:49',
'66:54:50',
'66:54:51',
'66:54:52',
'66:54:53',
'66:54:54',
'66:54:55',
'66:54:56',
'66:54:57',
'66:54:58',
'66:54:59',
'66:54:60',
'66:54:61',
'66:54:62',
'66:54:63',
'66:54:64',
'66:54:65',
'66:54:66',
'66:54:67',
'66:54:68',
'66:54:69',
'66:54:70',
'66:54:71',
'66:54:72',
'66:54:73',
'66:54:74',
'66:54:75',
'66:54:76',
'66:54:77',
'66:54:78',
'66:54:79',
'66:54:80',
'66:54:81',
'66:54:82',
'66:54:83',
'66:54:84',
'66:54:85',
'66:54:86',
'66:54:87',
'66:54:88',
'66:54:89',
'66:54:90',
'66:54:91',
'66:54:92',
'66:54:93',
'66:54:94',
'66:54:95',
'66:54:96',
'66:54:97',
'66:54:98',
'66:54:99',
'66:55:00',
'66:55:01',
'66:55:02',
'66:55:03',
'66:55:04',
'66:55:05',
'66:55:06',
'66:55:07',
'66:55:08',
'66:55:09',
'66:55:10',
'66:55:11',
'66:55:12',
'66:55:13',
'66:55:14',
'66:55:15',
'66:55:16',
'66:55:17',
'66:55:18',
'66:55:19',
'66:55:20',
'66:55:21',
'66:55:22',
'66:55:23',
'66:55:24',
'66:55:25',
'66:55:26',
'66:55:27',
'66:55:28',
'66:55:29',
'66:55:30',
'66:55:31',
'66:55:32',
'66:55:33',
'66:55:34',
'66:55:35',
'66:55:36',
'66:55:37',
'66:55:38',
'66:55:39',
'66:55:40',
'66:55:41',
'66:55:42',
'66:55:43',
'66:55:44',
'66:55:45',
'66:55:46',
'66:55:47',
'66:55:48',
'66:55:49',
'66:55:50',
'66:55:51',
'66:55:52',
'66:55:53',
'66:55:54',
'66:55:55',
'66:55:56',
'66:55:57',
'66:55:58',
'66:55:59',
'66:55:60',
'66:55:61',
'66:55:62',
'66:55:63',
'66:55:64',
'66:55:65',
'66:55:66',
'66:55:67',
'66:55:68',
'66:55:69',
'66:55:70',
'66:55:71',
'66:55:72',
'66:55:73',
'66:55:74',
'66:55:75',
'66:55:76',
'66:55:77',
'66:55:78',
'66:55:79',
'66:55:80',
'66:55:81',
'66:55:82',
'66:55:83',
'66:55:84',
'66:55:85',
'66:55:86',
'66:55:87',
'66:55:88',
'66:55:89',
'66:55:90',
'66:55:91',
'66:55:92',
'66:55:93',
'66:55:94',
'66:55:95',
'66:55:96',
'66:55:97',
'66:55:98',
'66:55:99',
'66:56:00',
'66:56:01',
'66:56:02',
'66:56:03',
'66:56:04',
'66:56:05',
'66:56:06',
'66:56:07',
'66:56:08',
'66:56:09',
'66:56:10',
'66:56:11',
'66:56:12',
'66:56:13',
'66:56:14',
'66:56:15',
'66:56:16',
'66:56:17',
'66:56:18',
'66:56:19',
'66:56:20',
'66:56:21',
'66:56:22',
'66:56:23',
'66:56:24',
'66:56:25',
'66:56:26',
'66:56:27',
'66:56:28',
'66:56:29',
'66:56:30',
'66:56:31',
'66:56:32',
'66:56:33',
'66:56:34',
'66:56:35',
'66:56:36',
'66:56:37',
'66:56:38',
'66:56:39',
'66:56:40',
'66:56:41',
'66:56:42',
'66:56:43',
'66:56:44',
'66:56:45',
'66:56:46',
'66:56:47',
'66:56:48',
'66:56:49',
'66:56:50',
'66:56:51',
'66:56:52',
'66:56:53',
'66:56:54',
'66:56:55',
'66:56:56',
'66:56:57',
'66:56:58',
'66:56:59',
'66:56:60',
'66:56:61',
'66:56:62',
'66:56:63',
'66:56:64',
'66:56:65',
'66:56:66',
'66:56:67',
'66:56:68',
'66:56:69',
'66:56:70',
'66:56:71',
'66:56:72',
'66:56:73',
'66:56:74',
'66:56:75',
'66:56:76',
'66:56:77',
'66:56:78',
'66:56:79',
'66:56:80',
'66:56:81',
'66:56:82',
'66:56:83',
'66:56:84',
'66:56:85',
'66:56:86',
'66:56:87',
'66:56:88',
'66:56:89',
'66:56:90',
'66:56:91',
'66:56:92',
'66:56:93',
'66:56:94',
'66:56:95',
'66:56:96',
'66:56:97',
'66:56:98',
'66:56:99',
'66:57:00',
'66:57:01',
'66:57:02',
'66:57:03',
'66:57:04',
'66:57:05',
'66:57:06',
'66:57:07',
'66:57:08',
'66:57:09',
'66:57:10',
'66:57:11',
'66:57:12',
'66:57:13',
'66:57:14',
'66:57:15',
'66:57:16',
'66:57:17',
'66:57:18',
'66:57:19',
'66:57:20',
'66:57:21',
'66:57:22',
'66:57:23',
'66:57:24',
'66:57:25',
'66:57:26',
'66:57:27',
'66:57:28',
'66:57:29',
'66:57:30',
'66:57:31',
'66:57:32',
'66:57:33',
'66:57:34',
'66:57:35',
'66:57:36',
'66:57:37',
'66:57:38',
'66:57:39',
'66:57:40',
'66:57:41',
'66:57:42',
'66:57:43',
'66:57:44',
'66:57:45',
'66:57:46',
'66:57:47',
'66:57:48',
'66:57:49',
'66:57:50',
'66:57:51',
'66:57:52',
'66:57:53',
'66:57:54',
'66:57:55',
'66:57:56',
'66:57:57',
'66:57:58',
'66:57:59',
'66:57:60',
'66:57:61',
'66:57:62',
'66:57:63',
'66:57:64',
'66:57:65',
'66:57:66',
'66:57:67',
'66:57:68',
'66:57:69',
'66:57:70',
'66:57:71',
'66:57:72',
'66:57:73',
'66:57:74',
'66:57:75',
'66:57:76',
'66:57:77',
'66:57:78',
'66:57:79',
'66:57:80',
'66:57:81',
'66:57:82',
'66:57:83',
'66:57:84',
'66:57:85',
'66:57:86',
'66:57:87',
'66:57:88',
'66:57:89',
'66:57:90',
'66:57:91',
'66:57:92',
'66:57:93',
'66:57:94',
'66:57:95',
'66:57:96',
'66:57:97',
'66:57:98',
'66:57:99',
'66:58:00',
'66:58:01',
'66:58:02',
'66:58:03',
'66:58:04',
'66:58:05',
'66:58:06',
'66:58:07',
'66:58:08',
'66:58:09',
'66:58:10',
'66:58:11',
'66:58:12',
'66:58:13',
'66:58:14',
'66:58:15',
'66:58:16',
'66:58:17',
'66:58:18',
'66:58:19',
'66:58:20',
'66:58:21',
'66:58:22',
'66:58:23',
'66:58:24',
'66:58:25',
'66:58:26',
'66:58:27',
'66:58:28',
'66:58:29',
'66:58:30',
'66:58:31',
'66:58:32',
'66:58:33',
'66:58:34',
'66:58:35',
'66:58:36',
'66:58:37',
'66:58:38',
'66:58:39',
'66:58:40',
'66:58:41',
'66:58:42',
'66:58:43',
'66:58:44',
'66:58:45',
'66:58:46',
'66:58:47',
'66:58:48',
'66:58:49',
'66:58:50',
'66:58:51',
'66:58:52',
'66:58:53',
'66:58:54',
'66:58:55',
'66:58:56',
'66:58:57',
'66:58:58',
'66:58:59',
'66:58:60',
'66:58:61',
'66:58:62',
'66:58:63',
'66:58:64',
'66:58:65',
'66:58:66',
'66:58:67',
'66:58:68',
'66:58:69',
'66:58:70',
'66:58:71',
'66:58:72',
'66:58:73',
'66:58:74',
'66:58:75',
'66:58:76',
'66:58:77',
'66:58:78',
'66:58:79',
'66:58:80',
'66:58:81',
'66:58:82',
'66:58:83',
'66:58:84',
'66:58:85',
'66:58:86',
'66:58:87',
'66:58:88',
'66:58:89',
'66:58:90',
'66:58:91',
'66:58:92',
'66:58:93',
'66:58:94',
'66:58:95',
'66:58:96',
'66:58:97',
'66:58:98',
'66:58:99',
'66:59:00',
'66:59:01',
'66:59:02',
'66:59:03',
'66:59:04',
'66:59:05',
'66:59:06',
'66:59:07',
'66:59:08',
'66:59:09',
'66:59:10',
'66:59:11',
'66:59:12',
'66:59:13',
'66:59:14',
'66:59:15',
'66:59:16',
'66:59:17',
'66:59:18',
'66:59:19',
'66:59:20',
'66:59:21',
'66:59:22',
'66:59:23',
'66:59:24',
'66:59:25',
'66:59:26',
'66:59:27',
'66:59:28',
'66:59:29',
'66:59:30',
'66:59:31',
'66:59:32',
'66:59:33',
'66:59:34',
'66:59:35',
'66:59:36',
'66:59:37',
'66:59:38',
'66:59:39',
'66:59:40',
'66:59:41',
'66:59:42',
'66:59:43',
'66:59:44',
'66:59:45',
'66:59:46',
'66:59:47',
'66:59:48',
'66:59:49',
'66:59:50',
'66:59:51',
'66:59:52',
'66:59:53',
'66:59:54',
'66:59:55',
'66:59:56',
'66:59:57',
'66:59:58',
'66:59:59',
'66:59:60',
'66:59:61',
'66:59:62',
'66:59:63',
'66:59:64',
'66:59:65',
'66:59:66',
'66:59:67',
'66:59:68',
'66:59:69',
'66:59:70',
'66:59:71',
'66:59:72',
'66:59:73',
'66:59:74',
'66:59:75',
'66:59:76',
'66:59:77',
'66:59:78',
'66:59:79',
'66:59:80',
'66:59:81',
'66:59:82',
'66:59:83',
'66:59:84',
'66:59:85',
'66:59:86',
'66:59:87',
'66:59:88',
'66:59:89',
'66:59:90',
'66:59:91',
'66:59:92',
'66:59:93',
'66:59:94',
'66:59:95',
'66:59:96',
'66:59:97',
'66:59:98',
'66:59:99',
'66:60:00',
'66:60:01',
'66:60:02',
'66:60:03',
'66:60:04',
'66:60:05',
'66:60:06',
'66:60:07',
'66:60:08',
'66:60:09',
'66:60:10',
'66:60:11',
'66:60:12',
'66:60:13',
'66:60:14',
'66:60:15',
'66:60:16',
'66:60:17',
'66:60:18',
'66:60:19',
'66:60:20',
'66:60:21',
'66:60:22',
'66:60:23',
'66:60:24',
'66:60:25',
'66:60:26',
'66:60:27',
'66:60:28',
'66:60:29',
'66:60:30',
'66:60:31',
'66:60:32',
'66:60:33',
'66:60:34',
'66:60:35',
'66:60:36',
'66:60:37',
'66:60:38',
'66:60:39',
'66:60:40',
'66:60:41',
'66:60:42',
'66:60:43',
'66:60:44',
'66:60:45',
'66:60:46',
'66:60:47',
'66:60:48',
'66:60:49',
'66:60:50',
'66:60:51',
'66:60:52',
'66:60:53',
'66:60:54',
'66:60:55',
'66:60:56',
'66:60:57',
'66:60:58',
'66:60:59',
'66:60:60',
'66:60:61',
'66:60:62',
'66:60:63',
'66:60:64',
'66:60:65',
'66:60:66',
'66:60:67',
'66:60:68',
'66:60:69',
'66:60:70',
'66:60:71',
'66:60:72',
'66:60:73',
'66:60:74',
'66:60:75',
'66:60:76',
'66:60:77',
'66:60:78',
'66:60:79',
'66:60:80',
'66:60:81',
'66:60:82',
'66:60:83',
'66:60:84',
'66:60:85',
'66:60:86',
'66:60:87',
'66:60:88',
'66:60:89',
'66:60:90',
'66:60:91',
'66:60:92',
'66:60:93',
'66:60:94',
'66:60:95',
'66:60:96',
'66:60:97',
'66:60:98',
'66:60:99',
'66:61:00',
'66:61:01',
'66:61:02',
'66:61:03',
'66:61:04',
'66:61:05',
'66:61:06',
'66:61:07',
'66:61:08',
'66:61:09',
'66:61:10',
'66:61:11',
'66:61:12',
'66:61:13',
'66:61:14',
'66:61:15',
'66:61:16',
'66:61:17',
'66:61:18',
'66:61:19',
'66:61:20',
'66:61:21',
'66:61:22',
'66:61:23',
'66:61:24',
'66:61:25',
'66:61:26',
'66:61:27',
'66:61:28',
'66:61:29',
'66:61:30',
'66:61:31',
'66:61:32',
'66:61:33',
'66:61:34',
'66:61:35',
'66:61:36',
'66:61:37',
'66:61:38',
'66:61:39',
'66:61:40',
'66:61:41',
'66:61:42',
'66:61:43',
'66:61:44',
'66:61:45',
'66:61:46',
'66:61:47',
'66:61:48',
'66:61:49',
'66:61:50',
'66:61:51',
'66:61:52',
'66:61:53',
'66:61:54',
'66:61:55',
'66:61:56',
'66:61:57',
'66:61:58',
'66:61:59',
'66:61:60',
'66:61:61',
'66:61:62',
'66:61:63',
'66:61:64',
'66:61:65',
'66:61:66',
'66:61:67',
'66:61:68',
'66:61:69',
'66:61:70',
'66:61:71',
'66:61:72',
'66:61:73',
'66:61:74',
'66:61:75',
'66:61:76',
'66:61:77',
'66:61:78',
'66:61:79',
'66:61:80',
'66:61:81',
'66:61:82',
'66:61:83',
'66:61:84',
'66:61:85',
'66:61:86',
'66:61:87',
'66:61:88',
'66:61:89',
'66:61:90',
'66:61:91',
'66:61:92',
'66:61:93',
'66:61:94',
'66:61:95',
'66:61:96',
'66:61:97',
'66:61:98',
'66:61:99',
'66:62:00',
'66:62:01',
'66:62:02',
'66:62:03',
'66:62:04',
'66:62:05',
'66:62:06',
'66:62:07',
'66:62:08',
'66:62:09',
'66:62:10',
'66:62:11',
'66:62:12',
'66:62:13',
'66:62:14',
'66:62:15',
'66:62:16',
'66:62:17',
'66:62:18',
'66:62:19',
'66:62:20',
'66:62:21',
'66:62:22',
'66:62:23',
'66:62:24',
'66:62:25',
'66:62:26',
'66:62:27',
'66:62:28',
'66:62:29',
'66:62:30',
'66:62:31',
'66:62:32',
'66:62:33',
'66:62:34',
'66:62:35',
'66:62:36',
'66:62:37',
'66:62:38',
'66:62:39',
'66:62:40',
'66:62:41',
'66:62:42',
'66:62:43',
'66:62:44',
'66:62:45',
'66:62:46',
'66:62:47',
'66:62:48',
'66:62:49',
'66:62:50',
'66:62:51',
'66:62:52',
'66:62:53',
'66:62:54',
'66:62:55',
'66:62:56',
'66:62:57',
'66:62:58',
'66:62:59',
'66:62:60',
'66:62:61',
'66:62:62',
'66:62:63',
'66:62:64',
'66:62:65',
'66:62:66',
'66:62:67',
'66:62:68',
'66:62:69',
'66:62:70',
'66:62:71',
'66:62:72',
'66:62:73',
'66:62:74',
'66:62:75',
'66:62:76',
'66:62:77',
'66:62:78',
'66:62:79',
'66:62:80',
'66:62:81',
'66:62:82',
'66:62:83',
'66:62:84',
'66:62:85',
'66:62:86',
'66:62:87',
'66:62:88',
'66:62:89',
'66:62:90',
'66:62:91',
'66:62:92',
'66:62:93',
'66:62:94',
'66:62:95',
'66:62:96',
'66:62:97',
'66:62:98',
'66:62:99',
'66:63:00',
'66:63:01',
'66:63:02',
'66:63:03',
'66:63:04',
'66:63:05',
'66:63:06',
'66:63:07',
'66:63:08',
'66:63:09',
'66:63:10',
'66:63:11',
'66:63:12',
'66:63:13',
'66:63:14',
'66:63:15',
'66:63:16',
'66:63:17',
'66:63:18',
'66:63:19',
'66:63:20',
'66:63:21',
'66:63:22',
'66:63:23',
'66:63:24',
'66:63:25',
'66:63:26',
'66:63:27',
'66:63:28',
'66:63:29',
'66:63:30',
'66:63:31',
'66:63:32',
'66:63:33',
'66:63:34',
'66:63:35',
'66:63:36',
'66:63:37',
'66:63:38',
'66:63:39',
'66:63:40',
'66:63:41',
'66:63:42',
'66:63:43',
'66:63:44',
'66:63:45',
'66:63:46',
'66:63:47',
'66:63:48',
'66:63:49',
'66:63:50',
'66:63:51',
'66:63:52',
'66:63:53',
'66:63:54',
'66:63:55',
'66:63:56',
'66:63:57',
'66:63:58',
'66:63:59',
'66:63:60',
'66:63:61',
'66:63:62',
'66:63:63',
'66:63:64',
'66:63:65',
'66:63:66',
'66:63:67',
'66:63:68',
'66:63:69',
'66:63:70',
'66:63:71',
'66:63:72',
'66:63:73',
'66:63:74',
'66:63:75',
'66:63:76',
'66:63:77',
'66:63:78',
'66:63:79',
'66:63:80',
'66:63:81',
'66:63:82',
'66:63:83',
'66:63:84',
'66:63:85',
'66:63:86',
'66:63:87',
'66:63:88',
'66:63:89',
'66:63:90',
'66:63:91',
'66:63:92',
'66:63:93',
'66:63:94',
'66:63:95',
'66:63:96',
'66:63:97',
'66:63:98',
'66:63:99',
'66:64:00',
'66:64:01',
'66:64:02',
'66:64:03',
'66:64:04',
'66:64:05',
'66:64:06',
'66:64:07',
'66:64:08',
'66:64:09',
'66:64:10',
'66:64:11',
'66:64:12',
'66:64:13',
'66:64:14',
'66:64:15',
'66:64:16',
'66:64:17',
'66:64:18',
'66:64:19',
'66:64:20',
'66:64:21',
'66:64:22',
'66:64:23',
'66:64:24',
'66:64:25',
'66:64:26',
'66:64:27',
'66:64:28',
'66:64:29',
'66:64:30',
'66:64:31',
'66:64:32',
'66:64:33',
'66:64:34',
'66:64:35',
'66:64:36',
'66:64:37',
'66:64:38',
'66:64:39',
'66:64:40',
'66:64:41',
'66:64:42',
'66:64:43',
'66:64:44',
'66:64:45',
'66:64:46',
'66:64:47',
'66:64:48',
'66:64:49',
'66:64:50',
'66:64:51',
'66:64:52',
'66:64:53',
'66:64:54',
'66:64:55',
'66:64:56',
'66:64:57',
'66:64:58',
'66:64:59',
'66:64:60',
'66:64:61',
'66:64:62',
'66:64:63',
'66:64:64',
'66:64:65',
'66:64:66',
'66:64:67',
'66:64:68',
'66:64:69',
'66:64:70',
'66:64:71',
'66:64:72',
'66:64:73',
'66:64:74',
'66:64:75',
'66:64:76',
'66:64:77',
'66:64:78',
'66:64:79',
'66:64:80',
'66:64:81',
'66:64:82',
'66:64:83',
'66:64:84',
'66:64:85',
'66:64:86',
'66:64:87',
'66:64:88',
'66:64:89',
'66:64:90',
'66:64:91',
'66:64:92',
'66:64:93',
'66:64:94',
'66:64:95',
'66:64:96',
'66:64:97',
'66:64:98',
'66:64:99',
'66:65:00',
'66:65:01',
'66:65:02',
'66:65:03',
'66:65:04',
'66:65:05',
'66:65:06',
'66:65:07',
'66:65:08',
'66:65:09',
'66:65:10',
'66:65:11',
'66:65:12',
'66:65:13',
'66:65:14',
'66:65:15',
'66:65:16',
'66:65:17',
'66:65:18',
'66:65:19',
'66:65:20',
'66:65:21',
'66:65:22',
'66:65:23',
'66:65:24',
'66:65:25',
'66:65:26',
'66:65:27',
'66:65:28',
'66:65:29',
'66:65:30',
'66:65:31',
'66:65:32',
'66:65:33',
'66:65:34',
'66:65:35',
'66:65:36',
'66:65:37',
'66:65:38',
'66:65:39',
'66:65:40',
'66:65:41',
'66:65:42',
'66:65:43',
'66:65:44',
'66:65:45',
'66:65:46',
'66:65:47',
'66:65:48',
'66:65:49',
'66:65:50',
'66:65:51',
'66:65:52',
'66:65:53',
'66:65:54',
'66:65:55',
'66:65:56',
'66:65:57',
'66:65:58',
'66:65:59',
'66:65:60',
'66:65:61',
'66:65:62',
'66:65:63',
'66:65:64',
'66:65:65',
'66:65:66',
'66:65:67',
'66:65:68',
'66:65:69',
'66:65:70',
'66:65:71',
'66:65:72',
'66:65:73',
'66:65:74',
'66:65:75',
'66:65:76',
'66:65:77',
'66:65:78',
'66:65:79',
'66:65:80',
'66:65:81',
'66:65:82',
'66:65:83',
'66:65:84',
'66:65:85',
'66:65:86',
'66:65:87',
'66:65:88',
'66:65:89',
'66:65:90',
'66:65:91',
'66:65:92',
'66:65:93',
'66:65:94',
'66:65:95',
'66:65:96',
'66:65:97',
'66:65:98',
'66:65:99',
'66:66:00',
'66:66:01',
'66:66:02',
'66:66:03',
'66:66:04',
'66:66:05',
'66:66:06',
'66:66:07',
'66:66:08',
'66:66:09',
'66:66:10',
'66:66:11',
'66:66:12',
'66:66:13',
'66:66:14',
'66:66:15',
'66:66:16',
'66:66:17',
'66:66:18',
'66:66:19',
'66:66:20',
'66:66:21',
'66:66:22',
'66:66:23',
'66:66:24',
'66:66:25',
'66:66:26',
'66:66:27',
'66:66:28',
'66:66:29',
'66:66:30',
'66:66:31',
'66:66:32',
'66:66:33',
'66:66:34',
'66:66:35',
'66:66:36',
'66:66:37',
'66:66:38',
'66:66:39',
'66:66:40',
'66:66:41',
'66:66:42',
'66:66:43',
'66:66:44',
'66:66:45',
'66:66:46',
'66:66:47',
'66:66:48',
'66:66:49',
'66:66:50',
'66:66:51',
| |
<gh_stars>1-10
import re
import time
import seaborn as sns
import networkx as nx # this module requires networkx version 2.6.3
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import logging
from sklearn.cluster import AgglomerativeClustering
from .common_functions import *
from .thermal_upgrade_functions import define_xfmr_object
from disco import timer_stats_collector
from jade.utils.timing_utils import track_timing, Timer
logger = logging.getLogger(__name__)
NODE_COLORLEGEND = {'Load': {'node_color': 'blue', 'node_size': 20, "alpha": 1, "label": "Load"},
'PV': {'node_color': 'orange', 'node_size': 50, "alpha": 0.8, "label": "PV"},
'Transformer': {'node_color': 'purple', 'node_size': 250, "alpha": 0.75, "label": "Transformer"},
'Circuit Source': {'node_color': 'black', 'node_size': 500, "alpha": 1, "label": "Source"},
'Violation': {'node_color': 'red', 'node_size': 500, "alpha": 0.75, "label": "Violation"},
'Capacitor': {'node_color': 'green', 'node_size': 100, "alpha": 0.75, "label": "Capacitor"},
'Voltage Regulator': {'node_color': 'cyan', 'node_size': 1000, "alpha": 0.75, "label": "Voltage Regulator"},
}
EDGE_COLORLEGEND = {'Violation': {'edge_color': 'violet', 'edge_size': 75, 'alpha': 0.75, "label": "Line Violation"}}
LENGTH_CONVERSION_TO_METRE = {
"mi": 1609.34,
"kft": 304.8,
"km": 1000,
"ft": 0.3048,
"in": 0.0254,
"cm": 0.01,
"m": 1,
}
def edit_capacitor_settings_for_convergence(voltage_config=None, control_command=''):
"""This function edits the dss command string with new capacitor settings, in case of convergence issues
Parameters
----------
voltage_config
control_command
Returns
-------
string
"""
capacitor_settings = {}
capacitor_settings["new_capON"] = round(
(voltage_config["nominal_voltage"] - (voltage_config["cap_sweep_voltage_gap"] + 1) / 2), 1)
capacitor_settings["new_capOFF"] = round(
(voltage_config["nominal_voltage"] + (voltage_config["cap_sweep_voltage_gap"] + 1) / 2), 1)
capacitor_settings["new_deadtime"] = 50
capacitor_settings["new_delay"] = 50
logger.info("Changed Initial On and Off Cap settings to avoid convergence issues ")
new_control_command = control_command
control_command = control_command.replace('New', 'Edit')
control_command = re.sub("enabled=True", "enabled=False", control_command)
check_dss_run_command(control_command) # disable and run previous control command
new_control_command = re.sub("DeadTime=\d+", 'DeadTime=' +
str(capacitor_settings["new_deadtime"]), new_control_command)
new_control_command = re.sub("Delay=\d+", 'Delay=' + str(capacitor_settings["new_delay"]), new_control_command)
new_control_command = re.sub("ONsetting=\d+\.\d+", 'ONsetting=' +
str(capacitor_settings["new_capON"]), new_control_command)
new_control_command = re.sub("OFFsetting=\d+\.\d+", 'OFFsetting=' +
str(capacitor_settings["new_capOFF"]), new_control_command)
return new_control_command
def correct_capacitor_parameters(default_capacitor_settings, orig_capacitors_df, nominal_voltage,
**kwargs):
"""Corrects cap control parameters: change to voltage controlled, correct PT ratio. Add cap control if not present
Parameters
----------
default_capacitor_settings
orig_capacitors_df
nominal_voltage
Returns
-------
"""
# correct capacitor settings
default_capcontrol_command = f"Type={default_capacitor_settings['cap_control']} " \
f"ONsetting={default_capacitor_settings['capON']} " \
f"OFFsetting={default_capacitor_settings['capOFF']} " \
f"PTphase={default_capacitor_settings['PTphase']} " \
f"Delay={default_capacitor_settings['capONdelay']} " \
f"DelayOFF={default_capacitor_settings['capOFFdelay']} " \
f"DeadTime={default_capacitor_settings['capdeadtime']} enabled=True"
# Correct settings of those cap banks for which cap control object is available
capacitors_commands_list = []
capcontrol_present_df = orig_capacitors_df.loc[orig_capacitors_df['capcontrol_present'] == 'capcontrol']
for index, row in capcontrol_present_df.iterrows():
# if capcontrol is present, change to voltage controlled and apply default settings. (this also adds re-computed PTratio)
if (row["capcontrol_type"].lower() != "voltage"):
logger.info(f"Capacitor control changed to voltage controlled for {row['capcontrol_name']}")
command_string = f"Edit CapControl.{row['capcontrol_name']} PTRatio={row['PTratio']} " \
f"{default_capcontrol_command}"
check_dss_run_command(command_string)
pass_flag = circuit_solve_and_check(raise_exception=False, **kwargs)
if not pass_flag:
command_string = edit_capacitor_settings_for_convergence(command_string)
check_dss_run_command(command_string)
# raise exception if no convergence even after change
circuit_solve_and_check(raise_exception=True, **kwargs)
capacitors_commands_list.append(command_string)
# if it is already voltage controlled, modify PT ratio if new is different after re-computation
if (row["capcontrol_type"].lower() == "voltage") and (round(row['PTratio'], 2) != round(row['old_PTratio'], 2)):
orig_string = ' !original, corrected PTratio only'
logger.info(f"PT ratio corrected for capcontrol {row['capcontrol_name']}.")
command_string = f"Edit CapControl.{row['capcontrol_name']} PTRatio={row['PTratio']}" + orig_string
check_dss_run_command(command_string)
# this does not change original settings, so should not cause convergence
circuit_solve_and_check(raise_exception=True, **kwargs)
capacitors_commands_list.append(command_string)
# if there are capacitors without cap control, add a voltage-controlled cap control
lines_df = get_thermal_equipment_info(compute_loading=False, equipment_type="line")
lines_df['bus1_extract'] = lines_df['bus1'].str.split(".").str[0]
no_capcontrol_present_df = orig_capacitors_df.loc[orig_capacitors_df['capcontrol_present'] != 'capcontrol']
for index, row in no_capcontrol_present_df.iterrows():
logger.info(f"Capacitor control (voltage controlled) added for {row['capcontrol_name']}.")
capcontrol_name = "capcontrol" + row['capacitor_name']
# extract line name that has the same bus as capacitor
line_name = lines_df.loc[lines_df['bus1_extract'] == row['bus1']]['name'].values[0]
default_pt_ratio = (row['kv'] * 1000) / nominal_voltage
command_string = f"New CapControl.{capcontrol_name} element=Line.{line_name} " \
f"terminal={default_capacitor_settings['terminal']} capacitor={row['capacitor_name']} " \
f"PTRatio={default_pt_ratio} {default_capcontrol_command}"
check_dss_run_command(command_string)
pass_flag = circuit_solve_and_check(raise_exception=False, **kwargs)
if not pass_flag:
command_string = edit_capacitor_settings_for_convergence(command_string)
check_dss_run_command(command_string)
# raise exception if no convergence even after change
circuit_solve_and_check(raise_exception=True, **kwargs)
capacitors_commands_list.append(command_string)
return capacitors_commands_list
@track_timing(timer_stats_collector)
def sweep_capacitor_settings(voltage_config, initial_capacitors_df, default_capacitor_settings, voltage_upper_limit,
voltage_lower_limit, **kwargs):
"""This function sweeps through capacitor settings and returns dataframe of severity metrics for all the sweeps of capacitor controls with best settings.
This function increases differences between cap ON and OFF voltages in user defined increments,
default 1 volt, until upper and lower bounds are reached.
Parameters
----------
voltage_config
initial_capacitors_df
default_capacitor_settings
upper_limit
lower_limit
Returns
-------
DataFrame
"""
# This function increases differences between cap ON and OFF voltages in user defined increments,
# default 1 volt, until upper and lower bounds are reached.
capacitor_sweep_list = [] # this list will contain severity of each capacitor setting sweep
# get severity index for original/initial capacitor settings (ie before the settings sweep)
temp_dict = {'cap_on_setting': 'original setting', 'cap_off_setting': 'original setting'}
pass_flag = circuit_solve_and_check(raise_exception=False, **kwargs)
if not pass_flag: # if there is convergence issue at this setting, go onto next setting and dont save
temp_dict['converged'] = False
else:
temp_dict['converged'] = True
severity_dict = compute_voltage_violation_severity(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit)
temp_dict.update(severity_dict)
capacitor_sweep_list.append(temp_dict)
# start settings sweep
cap_on_setting = default_capacitor_settings["capON"]
cap_off_setting = default_capacitor_settings["capOFF"]
cap_control_gap = voltage_config["capacitor_sweep_voltage_gap"]
# Apply same capacitor ON and OFF settings to all capacitor controls and determine their impact
# iterate over capacitor on and off settings while they are within voltage violation limits
while (cap_on_setting > (voltage_lower_limit * voltage_config["nominal_voltage"])) or \
(cap_off_setting < (voltage_upper_limit * voltage_config["nominal_voltage"])):
temp_dict = {'cap_on_setting': cap_on_setting, 'cap_off_setting': cap_off_setting}
for index, row in initial_capacitors_df.iterrows(): # apply settings to all capacitors
check_dss_run_command(f"Edit CapControl.{row['capcontrol_name']} ONsetting={cap_on_setting} "
f"OFFsetting={cap_off_setting}")
pass_flag = circuit_solve_and_check(raise_exception=False, **kwargs)
if not pass_flag: # if there is convergence issue at this setting, go onto next setting and dont save
temp_dict['converged'] = False
break
else:
temp_dict['converged'] = True
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=False, **kwargs)
severity_dict = compute_voltage_violation_severity(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit)
temp_dict.update(severity_dict)
capacitor_sweep_list.append(temp_dict)
if (cap_on_setting - cap_control_gap / 2) <= (voltage_lower_limit * voltage_config["nominal_voltage"]):
cap_on_setting = voltage_lower_limit * voltage_config["nominal_voltage"]
else:
cap_on_setting = cap_on_setting - cap_control_gap / 2
if (cap_off_setting + cap_control_gap / 2) >= (voltage_upper_limit * voltage_config["nominal_voltage"]):
cap_off_setting = voltage_upper_limit * voltage_config["nominal_voltage"]
else:
cap_off_setting = cap_off_setting + cap_control_gap / 2
capacitor_sweep_df = pd.DataFrame(capacitor_sweep_list)
return capacitor_sweep_df
def choose_best_capacitor_sweep_setting(capacitor_sweep_df, initial_capacitors_df, deciding_field, **kwargs):
"""This function takes the dataframe containing severity metrics, identifies the best cap control setting out
of all the sweeps and returns dataframe of capacitor controls with best settings
Parameters
----------
capacitor_sweep_df
initial_capacitors_df
Returns
-------
DataFrame
"""
# start with assumption that original setting is best setting
original_setting = capacitor_sweep_df.loc[capacitor_sweep_df['cap_on_setting'] == 'original setting'].iloc[0]
min_severity_setting = capacitor_sweep_df.loc[capacitor_sweep_df[deciding_field].idxmin()]
setting_type = ''
# if min severity is greater than or same as severity of original setting,
# then just assign original setting as min_severity_setting
if min_severity_setting[deciding_field] >= original_setting[deciding_field]:
capacitors_df = initial_capacitors_df.copy() # here best_setting is initial settings
logger.info("Original capacitor settings are best. No need to change capacitor settings.")
setting_type = 'initial_setting'
else:
logger.info("Capacitor settings changed.")
# apply same best setting to all capacitors
capacitors_df = initial_capacitors_df.copy()
capacitors_df['ONsetting'] = min_severity_setting['cap_on_setting']
capacitors_df['OFFsetting'] = min_severity_setting['cap_off_setting']
properties_list = ["ONsetting", "OFFsetting"] # list of properties to be edited in commands
capacitor_settings_commands_list = create_capcontrol_settings_commands(properties_list=properties_list,
capacitors_df=capacitors_df,
creation_action='Edit')
for command_string in capacitor_settings_commands_list:
check_dss_run_command(command_string)
circuit_solve_and_check(raise_exception=True, **kwargs)
if setting_type == 'initial_setting': # if initial settings are best, no need to return command with settings
capacitor_settings_commands_list = []
return capacitors_df, capacitor_settings_commands_list
def create_capcontrol_settings_commands(properties_list, capacitors_df, creation_action='New'):
"""This function creates a list of capacitor control commands, based on the properties list and cap dataframe passed
Parameters
----------
properties_list
capacitors_df
creation_action
Returns
-------
list
"""
capacitor_commands_list = []
if properties_list is None:
properties_list = ["ONsetting", "OFFsetting"]
for index, row in capacitors_df.iterrows():
command_string = f"{creation_action} CapControl.{row['capcontrol_name']}"
for property_name in properties_list:
command_string = command_string + f" {property_name}={row[property_name]}"
capacitor_commands_list.append(command_string)
return capacitor_commands_list
def determine_capacitor_upgrades(voltage_upper_limit, voltage_lower_limit, default_capacitor_settings, orig_capacitors_df,
voltage_config, deciding_field, **kwargs):
"""This function corrects capacitor parameters, sweeps through capacitor settings and determines the best capacitor setting.
It returns the dss commands associated with all these actions
"""
fig_folder = kwargs.get("fig_folder", None)
create_plots = kwargs.get("create_plots", False)
circuit_source = kwargs.get("circuit_source", None)
title = kwargs.get("title", "Bus violations after existing capacitor sweep module_")
capacitor_dss_commands = []
logger.info("Capacitors are present in the network. Perform capacitor bank control modifications.")
# correct cap control parameters: change to voltage controlled, correct PT ratio. Add cap control if not present
capcontrol_parameter_commands_list = correct_capacitor_parameters(
default_capacitor_settings=default_capacitor_settings, orig_capacitors_df=orig_capacitors_df,
nominal_voltage=voltage_config['nominal_voltage'], **kwargs)
capacitor_dss_commands = capacitor_dss_commands + capcontrol_parameter_commands_list
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, **kwargs)
if len(buses_with_violations) > 0:
# get capacitors dataframe before any settings changes are made
nosetting_changes_capacitors_df = get_capacitor_info(correct_PT_ratio=False)
# sweep through | |
<gh_stars>1-10
"""
The lightning training loop handles everything except the actual computations of your model.
To decide what will happen in your training loop, define the `training_step` function.
Below are all the things lightning automates for you in the training loop.
Accumulated gradients
---------------------
Accumulated gradients runs K small batches of size N before doing a backwards pass.
The effect is a large effective batch size of size KxN.
.. code-block:: python
# DEFAULT (ie: no accumulated grads)
trainer = Trainer(accumulate_grad_batches=1)
Force training for min or max epochs
------------------------------------
It can be useful to force training for a minimum number of epochs or limit to a max number
.. code-block:: python
# DEFAULT
trainer = Trainer(min_epochs=1, max_epochs=1000)
Force disable early stop
------------------------
To disable early stopping pass None to the early_stop_callback
.. code-block:: python
# DEFAULT
trainer = Trainer(early_stop_callback=None)
Gradient Clipping
-----------------
Gradient clipping may be enabled to avoid exploding gradients.
Specifically, this will `clip the gradient norm computed over all model parameters
`together <https://pytorch.org/docs/stable/nn.html#torch.nn.utils.clip_grad_norm_>`_.
.. code-block:: python
# DEFAULT (ie: don't clip)
trainer = Trainer(gradient_clip_val=0)
# clip gradients with norm above 0.5
trainer = Trainer(gradient_clip_val=0.5)
Inspect gradient norms
----------------------
Looking at grad norms can help you figure out where training might be going wrong.
.. code-block:: python
# DEFAULT (-1 doesn't track norms)
trainer = Trainer(track_grad_norm=-1)
# track the LP norm (P=2 here)
trainer = Trainer(track_grad_norm=2)
Set how much of the training set to check
-----------------------------------------
If you don't want to check 100% of the training set (for debugging or if it's huge), set this flag.
limit_train_batches will be overwritten by overfit_batches if `overfit_batches > 0`
.. code-block:: python
# DEFAULT
trainer = Trainer(limit_train_batches=1.0)
# check 10% only
trainer = Trainer(limit_train_batches=0.1)
# check 10 batches only
trainer = Trainer(limit_train_batches=10)
Packed sequences as inputs
--------------------------
When using PackedSequence, do 2 things:
1. return either a padded tensor in dataset or a list of variable length tensors
in the dataloader collate_fn (example above shows the list implementation).
2. Pack the sequence in forward or training and validation steps depending on use case.
.. code-block:: python
# For use in dataloader
def collate_fn(batch):
x = [item[0] for item in batch]
y = [item[1] for item in batch]
return x, y
# In module
def training_step(self, batch, batch_idx):
x = rnn.pack_sequence(batch[0], enforce_sorted=False)
y = rnn.pack_sequence(batch[1], enforce_sorted=False)
Truncated Backpropagation Through Time
--------------------------------------
There are times when multiple backwards passes are needed for each batch.
For example, it may save memory to use Truncated Backpropagation Through Time when training RNNs.
When this flag is enabled each batch is split into sequences of size truncated_bptt_steps
and passed to training_step(...) separately. A default splitting function is provided,
however, you can override it for more flexibility. See `tbptt_split_batch`.
.. code-block:: python
# DEFAULT (single backwards pass per batch)
trainer = Trainer(truncated_bptt_steps=None)
# (split batch into sequences of size 2)
trainer = Trainer(truncated_bptt_steps=2)
NaN detection and intervention
------------------------------
When the `terminate_on_nan` flag is enabled, after every forward pass during training, Lightning will
check that
1. the loss you return in `training_step` is finite (not NaN and not +/-inf)
2. the model parameters have finite values.
Lightning will terminate the training loop with an error message if NaN or infinite
values are detected. If this happens, you should investigate numerically unstable operations
in your model.
.. code-block:: python
# DEFAULT (won't perform the NaN check)
trainer = Trainer(terminate_on_nan=False)
# (NaN check each batch and terminate on NaN or infinite values)
trainer = Trainer(terminate_on_nan=True)
"""
import subprocess
from abc import ABC, abstractmethod
from typing import Callable
from typing import Union, List
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.distributed as torch_distrib
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import rank_zero_warn, NATIVE_AMP_AVALAIBLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.memory import recursive_detach
import os
from transformers import BartTokenizer
from gensim.corpora import Dictionary
from gensim.test.utils import datapath
from data_utils import DocDataset
import re
from mlutils.exp import yaml_load, yaml_dump
from mlutils.pt.training import GSMTrainer, extend_config_reference
try:
from apex import amp
except ImportError:
APEX_AVAILABLE = False
else:
APEX_AVAILABLE = True
try:
import torch_xla.distributed.parallel_loader as xla_pl
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
try:
import horovod.torch as hvd
except (ModuleNotFoundError, ImportError):
HOROVOD_AVAILABLE = False
else:
HOROVOD_AVAILABLE = True
# constant which signals should be catched for graceful trainer shutdown
SIGNAL_TERMINATE = ('SIGTERM', 'SIGSEGV', 'SIGINT')
class TrainerTrainLoopMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
max_epochs: int
min_epochs: int
on_gpu: bool
use_ddp: bool
use_dp: bool
use_ddp2: bool
use_horovod: bool
single_gpu: bool
use_tpu: bool
data_parallel_device_ids: ...
check_val_every_n_epoch: ...
num_training_batches: int
val_check_batch: ...
disable_validation: bool
fast_dev_run: ...
accumulation_scheduler: ...
lr_schedulers: ...
early_stop_callback: ...
callback_metrics: ...
logger: Union[LightningLoggerBase, bool]
global_step: int
testing: bool
log_save_interval: float
global_rank: int
row_log_interval: float
truncated_bptt_steps: ...
optimizers: ...
optimizer_frequencies: ...
accumulate_grad_batches: int
track_grad_norm: ...
model: LightningModule
interrupted: bool
running_loss: ...
progress_bar_dict: ...
reduce_lr_on_plateau_scheduler: ...
profiler: ...
batch_idx: int
precision: ...
train_dataloader: DataLoader
reload_dataloaders_every_epoch: bool
max_steps: int
min_steps: int
total_batch_idx: int
terminate_on_nan: bool
tpu_id: int
interactive_ddp_procs: ...
# Callback system
callbacks: List[Callback]
on_train_start: Callable
on_train_end: Callable
on_batch_start: Callable
on_batch_end: Callable
on_epoch_start: Callable
on_epoch_end: Callable
on_validation_end: Callable
on_keyboard_interrupt: Callable
@abstractmethod
def get_model(self) -> LightningModule:
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def is_function_implemented(self, *args, **kwargs):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def run_evaluation(self, *args, **kwargs):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def transfer_batch_to_gpu(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def transfer_batch_to_tpu(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def clip_gradients(self):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def detect_nan_tensors(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def is_overridden(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def add_progress_bar_metrics(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def log_metrics(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def process_output(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def reset_train_dataloader(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def reset_val_dataloader(self, model):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def has_arg(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
def train(self):
# add signal handlers for process kills
# def _signal_kill_handler(*args):
# return TrainerTrainLoopMixin.run_training_teardown(self)
#
# orig_signal_handlers = {}
# for sig_name in SIGNAL_TERMINATE:
# orig_signal_handlers[sig_name] = signal.signal(getattr(signal, sig_name),
# _signal_kill_handler)
# get model
model = self.get_model()
# enable train mode
model.train()
# enable gradients
torch.set_grad_enabled(True)
# load data
# if reload_dataloaders_every_epoch, this is moved to the epoch loop
if not self.reload_dataloaders_every_epoch:
self.reset_train_dataloader(model)
self.reset_val_dataloader(model)
# Train start events
with self.profiler.profile('on_train_start'):
# callbacks
self.on_train_start()
# model hooks
model.on_train_start()
try:
# run all epochs
for epoch in range(self.current_epoch, self.max_epochs):
# reset train dataloader
if self.reload_dataloaders_every_epoch:
self.reset_train_dataloader(model)
# set seed for distributed sampler (enables shuffling for each epoch)
if (self.use_ddp or self.use_horovod) \
and hasattr(self.train_dataloader, 'sampler') \
and hasattr(self.train_dataloader.sampler, 'set_epoch'):
self.train_dataloader.sampler.set_epoch(epoch)
# update training progress in trainer and model
model.current_epoch = epoch
self.current_epoch = epoch
# changing gradient according accumulation_scheduler
self.accumulation_scheduler.on_epoch_start(self, self.get_model())
# stores accumulated grad fractions per batch
self.batch_loss_value = TensorRunningAccum(
window_length=self.accumulate_grad_batches
)
# -----------------
# RUN TNG EPOCH
# -----------------
self.run_training_epoch()
if self.max_steps and self.max_steps <= self.global_step:
self.run_training_teardown()
return
# update LR schedulers
self.update_learning_rates(interval='epoch')
# early stopping
met_min_epochs = epoch >= self.min_epochs - 1
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
if self.should_stop:
if (met_min_epochs and met_min_steps) or self.fast_dev_run:
self.run_training_teardown()
return
else:
log.info('Trainer was signaled to stop but required minimum epochs'
f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'
' not been met. Training will continue...')
self.run_training_teardown()
except KeyboardInterrupt:
rank_zero_warn('Detected KeyboardInterrupt, attempting graceful shutdown...')
# user could press ctrl+c many times... only shutdown once
if not self.interrupted:
self.interrupted = True
self.on_keyboard_interrupt()
self.run_training_teardown()
def prepare_train_loop_dataloader(self, train_dataloader):
# on TPU we have to wrap it | |
database.h5.
Fe_Foil written to database.h5.
Ferrihydrite_20K written to database.h5.
Goethite_20K written to database.h5.
>>> # write selected group dataset
>>> write_collection_hdf5('database.h5', collection, names=['Fe_Foil'], replace=True)
Fe_Foil written to database.h5.
"""
# testing that the group exists
if type(collection) is not Collection:
raise TypeError('%s is not a valid Collection instance.' % collection)
# veryfying requested group names to write
all = collection.get_names()
if names == ['all']:
names = all
else:
for name in names:
if name not in all:
raise ValueError('%s group is not in the Collection.' % name)
# verifying existence of path:
# (a)ppend to existing file
# (w)rite to new file.
if isfile(fpath):
hdf5 = File(fpath, "a")
else:
hdf5 = File(fpath, "w")
for name in names:
# testing name on the HDF5 file
if name in hdf5:
# dataset present in the file
if replace:
hdf5.__delitem__(name)
else:
hdf5.close()
raise ValueError("%s already exists in %s." % (name, fpath))
dataset = hdf5.create_group(name)
group = collection.get_group(name)
try:
write_recursive_hdf5(dataset, group)
print("%s written to %s." % (name, fpath))
except:
hdf5.__delitem__(name)
hdf5.close()
raise IOError("%s couldn't be written to %s." % (name, fpath))
hdf5.close()
return
def write_recursive_hdf5(dataset: Dataset, group: Group) -> None:
"""Utility function to write a Group recursively in an HDF5 file.
Parameters
----------
dataset
Dataset in the HDF5 file.
group
Group to write in the HDF5 file.
Returns
-------
:
Warning
-------
Only :class:`str`, :class:`float`, :class:`int` and :class:`~numpy.ndarray`
types are currently supported for recursive writting in an HDF5 :class:`~h5py.Dataset`.
:class:`dict` and :class:`list` types will be convertet to :class:`str`, which is in
turn saved as :class:`bytes` in the HDF5 database.
If read with :func:`read_hdf5`, such records will be automatically converted to their
original type in the group.
"""
# accepted type variables for recursive writting
accepted_types = (str, float, int, ndarray)
converted_types = (dict, list)
for key in dir(group):
if '__' not in key:
record =getattr(group,key)
#vtype = type(record).__name__
if isinstance(record, accepted_types):
dataset.create_dataset(key, data=record)
elif isinstance(record, converted_types):
dataset.create_dataset(key, data=str(record))
return
def rename_dataset_hdf5(fpath: Path, name: str, newname: str) -> None:
"""Renames a dataset in an HDF5 file.
Parameters
----------
fpath
Path to HDF5 file.
name
Name of Group dataset.
newname
New name for Group dataset.
Returns
-------
:
Raises
------
IOError
If the HDF5 file does not exist in the specified path.
ValueError
If ``name`` dataset does not exist in the HDF5 file.
ValueError
If ``newname`` dataset already exists in the HDF5 file.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_xmu, write_hdf5, rename_dataset_hdf5
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # saving a new hdf5 file
>>> write_hdf5('database.h5', group_mu, name='xmu_testfile', replace=True)
xmu_testfile written to database.h5.
>>> # renaming dataset
>>> rename_dataset_hdf5('database.h5', 'xmu_testfile', 'xmu_renamed')
xmu_testfile renamed to xmu_renamed in database.h5.
"""
# verifying existence of path:
if isfile(fpath):
hdf5 = File(fpath, "a")
else:
raise IOError("file %s does not exists." % fpath)
if newname in hdf5:
hdf5.close()
raise ValueError('%s already exists in %s' % (newname, fpath))
# verifying existence of datagroup
if name in hdf5:
hdf5[newname] = hdf5[name]
else:
hdf5.close()
raise ValueError("%s does not exists in %s." % (name, fpath))
hdf5.__delitem__(name)
hdf5.close()
print ("%s renamed to %s in %s." % (name, newname, fpath))
return
def delete_dataset_hdf5(fpath: Path, name: str) -> None:
"""Deletes a dataset from an HDF5 file.
Parameters
----------
fpath
Path to HDF5 file.
name
Name of dataset to delete.
Returns
-------
:
Raises
------
IOError
If the HDF5 file does not exist in the specified path.
ValueError
If ``name`` dataset does not exist in the HDF5 file.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_xmu, write_hdf5, rename_dataset_hdf5
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # saving a new hdf5 file
>>> write_hdf5('database.h5', group_mu, name='xmu_testfile', replace=True)
xmu_testfile written to database.h5.
>>> # deleting dataset
>>> delete_dataset_hdf5('database.h5', 'xmu_testfile')
xmu_testfile deleted from database.h5.
"""
# verifying existence of path:
if isfile(fpath):
hdf5 = File(fpath, "a")
else:
hdf5.close()
raise IOError("File %s does not exists." % fpath)
# verifying existence of datagroup
if name in hdf5:
hdf5.__delitem__(name)
hdf5.close()
print ("%s deleted from %s." % (name, fpath))
else:
hdf5.close()
raise ValueError ("%s does not exists in %s." % (name, fpath))
return
def summary_hdf5(fpath: Path, regex: str=None, optional: Optional[list]=None,
**pre_edge_kws:dict) -> Report:
"""Returns a summary report of datasets in an HDF5 file.
Parameters
----------
fpath
Path to HDF5 file.
regex
Search string to filter results by dataset name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
pre_edge_kws
Dictionary with arguments for :func:`~araucaria.xas.normalize.pre_edge`.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
IOError
If the HDF5 file does not exist in the specified path.
Notes
-----
Summary data includes the following:
1. Dataset index.
2. Dataset name.
3. Measurement mode.
4. Numbers of scans.
5. Absorption edge step :math:`\Delta\mu(E_0)`, if ``optional=['edge_step']``.
6. Absorption threshold energy :math:`E_0`, if ``optional=['e0']``.
7. Merged scans, if ``optional=['merged_scans']``.
8. Optional parameters if they exist as attributes in the dataset.
A ``regex`` value can be used to filter dataset names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of the HDF5 dataset.
The absorption threshold and the edge step are retrieved by
calling the function :func:`~araucaria.xas.normalize.pre_edge`.
Optional parameters will be retrieved from the dataset as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:func:`read_hdf5`
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import summary_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> # printing default summary
>>> report = summary_hdf5(fpath)
>>> report.show()
=================================
id dataset mode n
=================================
1 FeIISO4_20K mu 5
2 Fe_Foil mu_ref 5
3 Ferrihydrite_20K mu 5
4 Goethite_20K mu 5
=================================
>>> # printing summary with merged scans of Goethite groups
>>> report = summary_hdf5(fpath, regex='Goe', optional=['merged_scans'])
>>> report.show()
=======================================================
id dataset mode n merged_scans
=======================================================
1 Goethite_20K mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=======================================================
>>> # printing custom parameters
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_xmu, write_hdf5
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving a new hdf5 file
>>> write_hdf5('database2.h5', group_mu, name='xmu_testfile', replace=True)
xmu_testfile written to database2.h5.
>>> report = summary_hdf5('database2.h5', optional=['symbol','temp'])
>>> report.show()
=========================================
id dataset mode n symbol temp
=========================================
1 xmu_testfile mu 1 Zn 25
=========================================
"""
# verifying existence of path:
if isfile(fpath):
hdf5 = File(fpath, "r")
else:
raise IOError("file %s does not exists." % fpath)
# list with parameter names
field_names = ['id', 'dataset', 'mode', 'n']
opt_list = ['merged_scans', 'edge_step', 'e0']
if pre_edge_kws == {}:
# default values
pre_edge_kws={'pre_range':[-150,-50], 'nnorm':3, 'post_range':[150, inf]}
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
keys = list(hdf5.keys())
if regex is None:
pass
else:
index = []
for i, key in enumerate(keys):
if search(regex, key) is None:
pass
else:
index.append(i)
keys = [keys[i] for i in index]
nkeys = len(keys)
for i, key in enumerate(keys):
data = read_hdf5(fpath, str(key))
scanval = data.get_mode()
extra_content = False # aux variable for 'merged_scans'
try:
# merged_scans is saved as string, so we count the number of commas
nscans = hdf5[key]['merged_scans'].asstr()[()].count(',') + 1
except:
nscans = 1
field_vals | |
"""
Provides the SpriteLoader class, that handles loading and caching of assets.
"""
import pathlib
import hashlib
from math import ceil
import os
from typing import Optional
from typing import Tuple
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from sdl2.ext import SpriteFactory
from sdl2.ext import TextureSprite
from sdl2 import endian
from sdl2 import surface
from sdl2 import pixels
from sdl2.ext import SDLError
from . import sdf
from . import vec2
from .common import SCALE
__author__ = '<NAME>'
__license__ = 'MIT'
__version__ = '0.1'
__copyright__ = """Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
COLOR = Tuple[int, int, int, int]
# This function is adapted directly from the PySDL2 package, to perform the
# conversion of a PIL/Pillow image into a SDL2 Sprite.
def _image2sprite(image, factory):
# pylint: disable=missing-docstring,bad-continuation,protected-access
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
mode = image.mode
width, height = image.size
rmask = gmask = bmask = amask = 0
if mode in ("1", "L", "P"):
# 1 = B/W, 1 bit per byte
# "L" = greyscale, 8-bit
# "P" = palette-based, 8-bit
pitch = width
depth = 8
elif mode == "RGB":
# 3x8-bit, 24bpp
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x0000FF
gmask = 0x00FF00
bmask = 0xFF0000
else:
rmask = 0xFF0000
gmask = 0x00FF00
bmask = 0x0000FF
depth = 24
pitch = width * 3
elif mode in ("RGBA", "RGBX"):
# RGBX: 4x8-bit, no alpha
# RGBA: 4x8-bit, alpha
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x000000FF
gmask = 0x0000FF00
bmask = 0x00FF0000
if mode == "RGBA":
amask = 0xFF000000
else:
rmask = 0xFF000000
gmask = 0x00FF0000
bmask = 0x0000FF00
if mode == "RGBA":
amask = 0x000000FF
depth = 32
pitch = width * 4
else:
# We do not support CMYK or YCbCr for now
raise TypeError("unsupported image format")
pxbuf = image.tobytes()
imgsurface = surface.SDL_CreateRGBSurfaceFrom(pxbuf, width, height,
depth, pitch, rmask,
gmask, bmask, amask)
if not imgsurface:
raise SDLError()
imgsurface = imgsurface.contents
# the pixel buffer must not be freed for the lifetime of the surface
imgsurface._pxbuf = pxbuf
if mode == "P":
# Create a SDL_Palette for the SDL_Surface
def _chunk(seq, size):
for x in range(0, len(seq), size):
yield seq[x:x + size]
rgbcolors = image.getpalette()
sdlpalette = pixels.SDL_AllocPalette(len(rgbcolors) // 3)
if not sdlpalette:
raise SDLError()
SDL_Color = pixels.SDL_Color
# pylint: disable=invalid-name
for idx, (r, g, b) in enumerate(_chunk(rgbcolors, 3)):
sdlpalette.contents.colors[idx] = SDL_Color(r, g, b)
ret = surface.SDL_SetSurfacePalette(imgsurface, sdlpalette)
# This will decrease the refcount on the palette, so it gets
# freed properly on releasing the SDL_Surface.
pixels.SDL_FreePalette(sdlpalette)
if ret != 0:
raise SDLError()
return factory.from_surface(imgsurface, free=True)
def parse_sdf_str(sdf_str):
"""
Parses SDF strings to get SDF type and keyword arguments for the function
call.
Args:
sdf_str: ``str`` -> SDF strings are formatted as follows:
"SDF:[sdf_type]:[I/F]:[parameter]=[value]:..." where value will be
converted to either I=int or F=float, where appropriate. Colors are
the exception, where always int is presumed in form of a tuple.
.. note::
SDF string Example:
``SDF:circle:I:radius=100:frame_color=(80,120,10):alpha=180``
This would produce a circle with a radius of 100 pixel filled
with the specified frame_color and alpha values.
Returns:
``Tuple[Dict, str]`` -> kwargs as dictionary and the SDF type as string.
"""
elements = sdf_str.split(':')
try:
isfloat = {'I': False, 'F': True}[elements[2]]
except KeyError:
raise ValueError(f'Expected either "I" or "F" for data type, got '
f'"{elements[2]}" instead.')
converter = {
'width': (float, int),
'height': (float, int),
'radius': (float, int),
'corner_radius': (float, int),
'border_thickness': (float, int),
'frame_color': (
lambda x: tuple([int(i) for i in x.strip()[1:-1].split(',')]),
),
'border_color': (
lambda x: tuple([int(i) for i in x.strip()[1:-1].split(',')]),
),
'multi_sampling': (int, ),
'alpha': (int, )
} # Insures that
kwargs = {}
w_unit = 0
for i in elements[3:]:
try:
k, value = i.split('=')
except ValueError:
raise ValueError(f'Expected "parameter=value", got "{i}" instead.')
if k in converter:
try:
kwargs[k] = converter[k][0 if isfloat else -1](value)
except (TypeError, ValueError):
raise ValueError(f'Unable to unpack parameter: "{i}"')
elif isfloat and k == 'w':
w_unit = int(value)
else:
raise ValueError(f'Unknown parameter: "{k}".')
if isfloat:
for k in kwargs:
if converter[k][0] is float:
kwargs[k] = int(kwargs[k] * w_unit + 0.5)
return kwargs, elements[1]
class SpriteLoader:
"""
Provides ``load_*`` methods that return Sprite objects of (cached) images
and a method to compose/flatten multiple images into a single one.
Automatically caches scaled and/or composed images on first load to reduce
subsequent load time.
Requires a SpriteFactory, a valid path to the asset directory and optionally
the cache directory can be specified, otherwise a 'cache' directory,
relative to ``os.getcwd()`` will be used. The cache_dir will be created if
absent on init.
"""
def __init__(
self,
factory, # type: SpriteFactory
asset_dir, # type: str
cache_dir=None, # type: Optional[str]
resize_type=Image.BICUBIC # type: Optional[int]
):
# type: (...) -> None
if not isinstance(factory, SpriteFactory):
raise TypeError('expected sdl2.ext.SpriteFactory for factory')
if not os.path.isdir(asset_dir):
raise NotADirectoryError(f'Invalid asset_dir')
self.factory = factory
self.asset_dir = asset_dir
self.cache_dir = cache_dir or os.path.join(os.getcwd(), 'cache')
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
self.resize_type = resize_type
self._assets = {}
self._sprite_cache = {}
self._font_cache = {}
self._refresh_assets()
def _refresh_assets(self):
paths = pathlib.Path(self.asset_dir).glob('**/*.*')
paths = [str(f.as_posix()) for f in paths]
paths = [
s[s.find(self.asset_dir) + len(self.asset_dir):] for s in paths
]
if paths and paths[0].startswith('/'):
paths = [s[1:] for s in paths]
self._assets = {}
for k in paths:
if k[-3:].lower() in ('ttf', 'otf'):
self._assets[k] = os.path.join(self.asset_dir, k)
continue
try:
_ = Image.open(os.path.join(self.asset_dir, k))
except IsADirectoryError:
continue
except IOError:
continue
self._assets[k] = Asset(k, self)
def load_image(self, asset_path: str, scale: SCALE = 1.0,
res: Tuple[int, int] = None, retry: bool = False):
"""
Loads asset_path at specified scale or generates (if necessary) a SDF
if `asset_path` starts with "SDF:" (SDF strings are case sensitive!).
"""
if res is not None:
impath = self._assets[asset_path][scale]
k = impath + str(res)
if k not in self._sprite_cache:
orig = Image.open(impath)
img = Image.new(orig.mode, res)
for i in range(ceil(res[0] / orig.size[0])):
for j in range(ceil(res[1] / orig.size[1])):
img.paste(orig, (i * orig.size[0], j * orig.size[1]))
self._sprite_cache[k] = _image2sprite(img, self.factory)
return self._sprite_cache[k]
if asset_path.startswith('SDF:'):
return self._load_sdf(asset_path)
if asset_path in self._assets:
k = self._assets[asset_path][scale]
if k not in self._sprite_cache:
self._sprite_cache[k] = _image2sprite(
Image.open(k),
self.factory
)
return self._sprite_cache[k]
elif not retry:
self._refresh_assets()
return self.load_image(asset_path, scale, res, retry=True)
raise ValueError(f'asset_path must be a valid path relative to '
f'"{self.asset_dir}" without leading "/". Got '
f'"{asset_path}".')
def load_text(self, text, font, size, color, align, spacing, multiline):
# type: (str, str, int, COLOR, str, int, bool) -> TextureSprite
"""Load """
# pylint: disable=too-many-arguments
return self._cache_text(text, font, size, color, align, spacing,
multiline)
def textsize(self, text, font, size, spacing, multiline):
"""Returns the canvas size for the provided arguments."""
if not text:
return 0, 0
size, _ = self._compute_text_size_pos(text, font, size, spacing,
multiline)
return size
def imagesize(self, asset_path, scale=1.0):
"""Return the image size for a given asset and scale."""
if asset_path.startswith('SDF:'):
kwargs, _ = parse_sdf_str(asset_path)
return kwargs['width'], kwargs['height']
if asset_path in self._assets:
k = self._assets[asset_path][scale]
if k in self._sprite_cache:
return self._sprite_cache[k].size
return Image.open(k).size
raise ValueError(f'asset_path must be a valid path relative to '
f'"{self.asset_dir}" without leading "/". Got '
f'"{asset_path}".')
def valid_asset(self, asset_path):
"""Verify that a given asset path is valid."""
return asset_path in self._assets
def empty_cache(self):
"""Delete all cached files."""
for asset in self._assets.values():
asset.empty_cache()
def _compute_text_size_pos(self, text, font, size, | |
<filename>tpDcc/dccs/maya/dcc.py<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Maya DCC implementation
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import logging
from collections import OrderedDict
import numpy as np
from Qt.QtWidgets import QApplication, QMainWindow
import maya.cmds
import maya.mel
import maya.utils
import maya.api.OpenMaya
from tpDcc.core import dcc, consts
from tpDcc.libs.python import python
from tpDcc.libs.qt.core import qtutils
from tpDcc.libs.math.core import matrix
from tpDcc.dccs.maya.api import mathlib as maya_math
from tpDcc.dccs.maya.core import helpers, gui, node, name, scene, shape, transform, decorators as maya_decorators
from tpDcc.dccs.maya.core import attribute, namespace, playblast, constants as maya_constants, joint as joint_utils
from tpDcc.dccs.maya.core import reference as ref_utils, constraint as constraint_utils, shader as shader_utils
from tpDcc.dccs.maya.core import filtertypes, animation, sequencer, camera as cam_utils, cluster as cluster_utils
from tpDcc.dccs.maya.core import space as space_utils, geometry as geo_utils, rivet as rivet_utils, color as maya_color
from tpDcc.dccs.maya.core import directory, follicle as follicle_utils, curve as curve_utils, ik as ik_utils
from tpDcc.dccs.maya.core import humanik, deformer as deformer_utils, skin as skin_utils
LOGGER = logging.getLogger('tpDcc-dccs-maya')
# =================================================================================================================
# GENERAL
# =================================================================================================================
def get_name():
"""
Returns the name of the DCC
:return: str
"""
return dcc.Dccs.Maya
def get_extensions():
"""
Returns supported extensions of the DCC
:return: list(str)
"""
return ['.ma', '.mb']
def get_version():
"""
Returns version of the DCC
:return: int
"""
return helpers.get_maya_version()
def get_version_name():
"""
Returns version of the DCC
:return: str
"""
return str(helpers.get_maya_version())
def is_batch():
"""
Returns whether DCC is being executed in batch mode or not
:return: bool
"""
return maya.cmds.about(batch=True)
def execute_deferred(fn):
"""
Executes given function in deferred mode
"""
maya.utils.executeDeferred(fn)
def deferred_function(fn, *args, **kwargs):
"""
Calls given function with given arguments in a deferred way
:param fn:
:param args: list
:param kwargs: dict
"""
return maya.cmds.evalDeferred(fn, *args, **kwargs)
def is_component_mode():
"""
Returns whether current DCC selection mode is component mode or not
:return: bool
"""
return maya.cmds.selectMode(query=True, component=True)
def enable_component_selection():
"""
Enables DCC component selection mode
"""
return maya.cmds.selectMode(component=True)
def is_plugin_loaded(plugin_name):
"""
Return whether given plugin is loaded or not
:param plugin_name: str
:return: bool
"""
return helpers.is_plugin_loaded(plugin_name)
def load_plugin(plugin_path, quiet=True):
"""
Loads given plugin
:param plugin_path: str
:param quiet: bool
"""
return helpers.load_plugin(plugin_path, quiet=quiet)
def unload_plugin(plugin_path):
"""
Unloads the given plugin
:param plugin_path: str
"""
return helpers.unload_plugin(plugin_path)
def list_old_plugins():
"""
Returns a list of old plugins in the current scene
:return: list(str)
"""
return helpers.list_old_plugins()
def remove_old_plugin(plugin_name):
"""
Removes given old plugin from current scene
:param plugin_name: str
"""
return helpers.remove_old_plugin(plugin_name)
def set_workspace(workspace_path):
"""
Sets current workspace to the given path
:param workspace_path: str
"""
return maya.mel.eval('setProject \"' + workspace_path + '\"')
# return maya.cmds.workspace(workspace_path, openWorkspace=True)
def warning(message):
"""
Prints a warning message
:param message: str
:return:
"""
maya.cmds.warning(message)
def error(message):
"""
Prints a error message
:param message: str
:return:
"""
maya.cmds.error(message)
def fit_view(animation=True):
"""
Fits current viewport to current selection
:param animation: bool, Animated fit is available
"""
maya.cmds.viewFit(an=animation)
def refresh_viewport():
"""
Refresh current DCC viewport
"""
maya.cmds.refresh(currentView=True)
def refresh_all_viewport():
"""
Refresh all DCC viewports
"""
maya.cmds.refresh(currentView=False)
def focus(object_to_focus):
"""
Focus in given object
:param object_to_focus: str
"""
maya.cmds.setFocus(object_to_focus)
def enable_undo():
"""
Enables undo functionality
"""
maya.cmds.undoInfo(openChunk=True)
def disable_undo():
"""
Disables undo functionality
"""
maya.cmds.undoInfo(closeChunk=True)
# =================================================================================================================
# GUI
# =================================================================================================================
def get_dpi(value=1):
"""
Returns current DPI used by DCC
:param value: float
:return: float
"""
qt_dpi = QApplication.devicePixelRatio() if maya.cmds.about(batch=True) else QMainWindow().devicePixelRatio()
return max(qt_dpi * value, get_dpi_scale(value))
def get_dpi_scale(value):
"""
Returns current DPI scale used by DCC
:return: float
"""
maya_scale = 1.0 if not hasattr(
maya.cmds, "mayaDpiSetting") else maya.cmds.mayaDpiSetting(query=True, realScaleValue=True)
return maya_scale * value
def get_main_window():
"""
Returns Qt object that references to the main DCC window
:return:
"""
return gui.get_maya_window()
def get_main_menubar():
"""
Returns Qt object that references to the main DCC menubar
:return:
"""
win = get_main_window()
menu_bar = win.menuBar()
return menu_bar
def is_window_floating(window_name):
"""
Returns whether or not DCC window is floating
:param window_name: str
:return: bool
"""
return gui.is_window_floating(window_name=window_name)
def focus_ui_panel(panel_name):
"""
Focus UI panel with given name
:param panel_name: str
"""
return maya.cmds.setFocus(panel_name)
def get_dockable_window_class():
return MayaDockedWindow
def get_dialog_result_yes():
"""
Returns output when a DCC dialog result is accepted
:return:
"""
return maya_constants.DialogResult.Yes
def get_dialog_result_no():
"""
Returns output when a DCC dialog result is rejected
:return:
"""
return maya_constants.DialogResult.No
def get_dialog_result_cancel():
"""
Returns output when a DCC dialog result is cancelled
:return:
"""
return maya_constants.DialogResult.Cancel
def get_dialog_result_close():
"""
Returns output when a DCC dialog result is close
:return:
"""
return maya_constants.DialogResult.Close
def show_message_in_viewport(msg, **kwargs):
"""
Shows a message in DCC viewport
:param msg: str, Message to show
:param kwargs: dict, extra arguments
"""
color = kwargs.get('color', '')
pos = kwargs.get('pos', 'topCenter')
if color != '':
msg = "<span style=\"color:{0};\">{1}</span>".format(color, msg)
maya.cmds.inViewMessage(amg=msg, pos=pos, fade=True, fst=1000, dk=True)
def add_shelf_menu_item(parent, label, command='', icon=''):
"""
Adds a new menu item
:param parent:
:param label:
:param command:
:param icon:
:return:
"""
return maya.cmds.menuItem(parent=parent, labelong=label, command=command, image=icon or '')
def add_shelf_sub_menu_item(parent, label, icon=''):
"""
Adds a new sub menu item
:param parent:
:param label:
:param icon:
:return:
"""
return maya.cmds.menuItem(parent=parent, labelong=label, icon=icon or '', subMenu=True)
def add_shelf_separator(shelf_name):
"""
Adds a new separator to the given shelf
:param shelf_name: str
"""
return maya.cmds.separator(
parent=shelf_name, manage=True, visible=True, horizontalong=False,
style='shelf', enableBackground=False, preventOverride=False)
def shelf_exists(shelf_name):
"""
Returns whether given shelf already exists or not
:param shelf_name: str
:return: bool
"""
return gui.shelf_exists(shelf_name=shelf_name)
def create_shelf(shelf_name, shelf_labelong=None):
"""
Creates a new shelf with the given name
:param shelf_name: str
:param shelf_label: str
"""
return gui.create_shelf(name=shelf_name)
def delete_shelf(shelf_name):
"""
Deletes shelf with given name
:param shelf_name: str
"""
return gui.delete_shelf(shelf_name=shelf_name)
def confirm_dialog(title, message, button=None, cancel_button=None, default_button=None, dismiss_string=None):
"""
Shows DCC confirm dialog
:param title:
:param message:
:param button:
:param cancel_button:
:param default_button:
:param dismiss_string:
:return:
"""
if button and cancel_button and dismiss_string and default_button:
return maya.cmds.confirmDialog(
title=title, message=message, button=button, cancelButton=cancel_button,
defaultButton=default_button, dismissString=dismiss_string)
if button:
return maya.cmds.confirmDialog(title=title, message=message)
else:
return maya.cmds.confirmDialog(title=title, message=message, button=button)
def select_file_dialog(title, start_directory=None, pattern=None):
"""
Shows select file dialog
:param title: str
:param start_directory: str
:param pattern: str
:return: str
"""
if not pattern:
pattern = 'All Files (*.*)'
res = maya.cmds.fileDialog2(fm=1, dir=start_directory, cap=title, ff=pattern)
if res:
res = res[0]
return res
def select_folder_dialog(title, start_directory=None):
"""
Shows select folder dialog
:param title: str
:param start_directory: str
:return: str
"""
return directory.select_folder_dialog(title=title, start_directory=start_directory)
def save_file_dialog(title, start_directory=None, pattern=None):
"""
Shows save file dialog
:param title: str
:param start_directory: str
:param pattern: str
:return: str
"""
return directory.save_file_dialog(title=title, start_directory=start_directory, pattern=pattern)
def get_current_model_panel():
"""
Returns the current model panel name
:return: str | None
"""
current_panel = maya.maya.cmds.getPanel(withFocus=True)
current_panel_type = maya.maya.cmds.getPanel(typeOf=current_panel)
if current_panel_type not in ['modelPanel']:
return None
return current_panel
def dock_widget(widget, *args, **kwargs):
"""
Docks given widget into current DCC UI
:param widget: QWidget
:param args:
:param kwargs:
:return:
"""
return qtutils.dock_widget(widget, *args, **kwargs)
def get_all_fonts():
"""
Returns all fonts available in DCC
:return: list(str)
"""
return maya.cmds.fontDialog(FontList=True) or list()
# =================================================================================================================
# OBJECTS / NODES
# =================================================================================================================
def node_types():
"""
Returns dictionary that provides a mapping between tpDcc object types and DCC specific node types
Can be the situation where a tpDcc object maps maps to more than one MFn object
None values are ignored. This is because either do not exists or there is not equivalent type in Maya
:return: dict
"""
return OrderedDict([
(consts.ObjectTypes.Geometry, [maya.api.OpenMaya.MFn.kMesh, 'mesh']),
(consts.ObjectTypes.Light, [maya.api.OpenMaya.MFn.kLight, 'light']),
(consts.ObjectTypes.Camera, [maya.api.OpenMaya.MFn.kCamera, 'camera']),
(consts.ObjectTypes.Model, [maya.api.OpenMaya.MFn.kTransform, 'transform']),
(consts.ObjectTypes.Group, [maya.api.OpenMaya.MFn.kTransform, 'transform']),
(consts.ObjectTypes.Bone, [maya.api.OpenMaya.MFn.kJoint, 'joint']),
(consts.ObjectTypes.Particle, [
(maya.api.OpenMaya.MFn.kParticle, maya.api.OpenMaya.MFn.kNParticle), ('particle', 'particle')]),
(consts.ObjectTypes.Curve, [maya.api.OpenMaya.MFn.kCurve, 'curve']),
(consts.ObjectTypes.PolyMesh, [maya.api.OpenMaya.MFn.kPolyMesh, 'polyMesh']),
(consts.ObjectTypes.NurbsSurface, [maya.api.OpenMaya.MFn.kNurbsSurface, 'nurbsSurface']),
(consts.ObjectTypes.Network, [maya.api.OpenMaya.MFn.kAffect, 'network']),
(consts.ObjectTypes.Null, [maya.api.OpenMaya.MFn.kLocator, 'locator']),
])
def dcc_to_tpdcc_types():
"""
Returns a dictionary that provides a mapping between Dcc object types and tpDcc object types
:return:
"""
dcc_to_abstract_types = OrderedDict()
for abstract_type, dcc_type in node_types().items():
if isinstance(dcc_type[0], (tuple, list)):
for item in dcc_type[0]:
dcc_to_abstract_types[item] = abstract_type
else:
dcc_to_abstract_types[dcc_type[0]] = abstract_type
def dcc_to_tpdcc_str_types():
"""
Returns a dictionary that provides a mapping between Dcc string object types and tpDcc object types
:return:
"""
dcc_to_abstract_str_types = OrderedDict()
for abstract_type, dcc_type in node_types().items():
if isinstance(dcc_type[1], (tuple, list)):
for item in dcc_type[1]:
dcc_to_abstract_str_types[item] = abstract_type
else:
dcc_to_abstract_str_types[dcc_type[1]] = abstract_type
def node_tpdcc_type(node_name, as_string=False):
"""
Returns the DCC object type as a string given a specific tpDcc object type
:param node_name: str
:param as_string: bool
:return: str
"""
if as_string:
node_type = maya.cmds.objectType(node_name)
if node_type == 'transform':
return 'transform'
if node_type in self.DCC_TO_ABSTRACT_STR_TYPES:
maya_type = self.DCC_TO_ABSTRACT_STR_TYPES[node_type]
return node_types()[maya_type][1]
else:
maya_node = node.get_mobject(node_name)
maya_api_type = maya_node.apiType()
# TODO: We are hardcoding node type returns. Maybe we should return a generic transform and let user
# TODO: to handle shape types | |
from flask import request, Blueprint
from flask_cors import cross_origin
from server.apis.api_init import auth, backstageInfo, blockOfBeings, blockOfTimes, blockOfGarbage, mainNodeManager, vote
from server.utils.message import HttpMessage
from server.config import Allow_Url_List
backstage = Blueprint('backstage', __name__)
# 登录
@backstage.route("/backstage/login", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def login():
"""后台用户登录
Content-Type: application/json
{
"username": "",
"password": "",
"captcha":{
"uuid":"",
"word":“”
}
}
返回 json
{
"is_success":bool,
"data":{
"token": ""
}
"""
if request.method == 'POST':
info = request.get_json()
try:
captcha = info["captcha"]
if not auth.verifyCaptcha(uuid=captcha["uuid"], word=captcha["word"]):
http_message = HttpMessage(is_success=False, data="验证码错误")
return http_message.getJson()
username = info["username"]
password = info["password"]
token = auth.generateTokenByUsernameAndPassword(username=username, password=password)
if token == "false":
http_message = HttpMessage(is_success=False, data="用户名或密码错误")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=True, data=token)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/token/verify", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def verifyToken():
"""验证token
Content-Type: application/json
{
"token": ""
}
返回 json
{
"is_success":bool,
"data":""
"""
try:
info = request.get_json()
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=True, data="Token有效")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/index_notice/modify", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def modifyIndexNotice():
"""修改首页公告
Content-Type: application/json
{
"token": "",
"content":""
}
返回 json
{
"is_success":bool,
"data": str base64 markdown
}
"""
try:
info = request.get_json()
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
content = info["content"]
res = backstageInfo.modifyIndexNotice(content)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/record_number/set", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def setRecordNumber():
"""设置备案号
Content-Type: application/json
{
"token": "",
"record_number":""
}
返回 json
{
"is_success":bool,
"data": {}
}
"""
try:
info = request.get_json()
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
record_number = info["record_number"]
res = backstageInfo.setRecordNumber(record_number)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/beings_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getBeingList():
"""获取待审核众生区块列表
Content-Type: application/json
{
"count": int,
"token":""
}
or
{
"offset":int,
"count": int,
"token":""
}
返回 json
{
"is_success":bool,
"data":[
{'db_id': 2, 'crate_time': '1645617280.98137'},....
]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
try:
offset = info["offset"]
except Exception:
offset = None
res = blockOfBeings.getBlockList(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/waiting_beings_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getWaitingBeingList():
"""获取待发布众生区块列表
Content-Type: application/json
{
"offset":int,
"count": int,
"token":""
}
返回 json
{
"is_success":bool,
"data":[
{'db_id': 2, 'crate_time': '1645617280.98137'},....
]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
offset = info["offset"]
res = blockOfBeings.getWaitingBlockList(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/beings/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getBeings():
"""获取众生区块详细信息
Content-Type: application/json
{
"db_id": int,
"token":""
}
返回 json
{
"is_success":bool,
"data": {'db_id': 2, 'crate_time': '1645617280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
db_id = info["db_id"]
res = blockOfBeings.getBlockByDBId(db_id)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
http_message = HttpMessage(is_success=False, data=err)
return http_message.getJson()
@backstage.route("/backstage/beings/audit", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def auditBeings():
"""审核众生区块
Content-Type: application/json
{
"db_id": int,
"token":"",
"is_review":int
}
返回 json
{
"is_success":bool,
"data": {'db_id': 2, 'crate_time': '1645617280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
db_id = info["db_id"]
is_review = info["is_review"]
blockOfBeings.reviewBlock(db_id, is_review)
http_message = HttpMessage(is_success=True, data="修改成功")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/beings/recommend", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def recommendBeings():
"""推荐众生区块
Content-Type: application/json
{
"token":"",
"block_id":""
}
返回 json
{
"is_success":bool,
"data":
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
beings_block_id = info["block_id"]
if blockOfTimes.addTimesBlockQueue(beings_block_id):
http_message = HttpMessage(is_success=True, data="推荐成功")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=False, data="该区块已经被推荐")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/beings/marker", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def markerBeings():
"""标记众生区块
Content-Type: application/json
{
"token":"",
"block_id":""
}
返回 json
{
"is_success":bool,
"data":
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
beings_block_id = info["block_id"]
if blockOfGarbage.addGarbageBlockQueue(beings_block_id):
http_message = HttpMessage(is_success=True, data="标记成功")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=False, data="该区块已经被标记")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/beings/unmark", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def revocationBeings():
"""撤销标记众生区块
Content-Type: application/json
{
"token":"",
"block_id":""
}
返回 json
{
"is_success":bool,
"data":
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
beings_block_id = info["block_id"]
blockOfGarbage.revocationGarbageBlockQueueByBlockId(beings_block_id)
http_message = HttpMessage(is_success=True, data="撤销标记成功")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/new_apply_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getNewApplyList():
"""获取通过此主节点申请成为主节点的申请书列表
Content-Type: application/json
{
"token":"",
"offset":int,
"count": int,
}
返回 json
{
"is_success":bool,
"data": [{'db_id': 1, 'crate_time': '16453453280.98137'}]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
offset = info["offset"]
res = mainNodeManager.getApplicationListOfMainNode(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/active_delete_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getActiveDeleteList():
"""获取该主节点申请的已经广播的申请书列表(主动删除节点)
Content-Type: application/json
{
"token":"",
"offset":int,
"count": int,
}
返回 json
{
"is_success":bool,
"data": [{'db_id': 1, 'crate_time': '16453453280.98137'}]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
offset = info["offset"]
res = mainNodeManager.getApplicationActiveDeleteListOfBroadcast(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/other_apply_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getOtherApplyList():
"""获取通过其他主节点申请成为主节点的申请书列表
Content-Type: application/json
{
"token":"",
"offset":int,
"count": int,
}
返回 json
{
"is_success":bool,
"data": [{'db_id': 1, 'crate_time': '16453453280.98137'}]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
offset = info["offset"]
res = mainNodeManager.getApplicationOfOtherMainNode(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/other_active_delete_list/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getOtherActiveDeleteList():
"""获取其他主节点提交的申请书列表(主动删除)
Content-Type: application/json
{
"token":"",
"offset":int,
"count": int,
}
返回 json
{
"is_success":bool,
"data": [{'db_id': 1, 'crate_time': '16453453280.98137'}]
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
count = info["count"]
offset = info["offset"]
res = mainNodeManager.getApplicationActiveDeleteOfOtherMainNode(offset, count)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/new_apply/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getNewApply():
"""获取通过此主节点申请成为主节点的申请书
Content-Type: application/json
{
"token":"",
"db_id":int
}
返回 json
{
"is_success":bool,
"data": {'db_id': 1, 'crate_time': '16453453280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
db_id = info["db_id"]
res = mainNodeManager.getApplicationFormByDBId(db_id)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/active_delete/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getActiveDelete():
"""获取该主节点申请的已经广播的申请书(主动删除节点)
Content-Type: application/json
{
"token":"",
"db_id":int
}
返回 json
{
"is_success":bool,
"data": {'db_id': 1, 'crate_time': '16453453280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
db_id = info["db_id"]
res = mainNodeManager.getApplicationFormActiveDeleteByDBId(db_id)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/active_delete/add", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def addActiveDelete():
"""主动申请删除谋主节点
Content-Type: application/json
{
"token":"",
"node_id":"",
"application_content":"",
"remarks":""
}
返回 json
{
"is_success":bool,
"data": {'db_id': 1, 'crate_time': '16453453280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
node_id = info["node_id"]
application_content = info["application_content"]
remarks = info["remarks"]
res = mainNodeManager.addApplicationFormActiveDelete(node_id, application_content, remarks)
if res:
http_message = HttpMessage(is_success=True, data="增加成功")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=False, data="增加失败")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/other_apply/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getOtherApply():
"""获取通过其他主节点申请成为主节点的申请书
Content-Type: application/json
{
"token":"",
"db_id":int
}
返回 json
{
"is_success":bool,
"data": {'db_id': 1, 'crate_time': '16453453280.98137'....}
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
db_id = info["db_id"]
res = mainNodeManager.getOtherNodeApplicationFormByDBId(db_id)
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
@backstage.route("/backstage/main_node/other_active_delete/get", methods=['POST'])
@cross_origin(origins=Allow_Url_List)
def getOtherActiveDelete():
"""获取其他主节点申请的已经广播的申请书(主动删除节点)
Content-Type: application/json
{
"token":"",
"db_id":int
}
返回 json
{
| |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from threading import Timer
from nose.tools import *
from nose.twistedtools import reactor, deferred
from twisted.internet import defer
from scapy.all import *
import time, monotonic
import os, sys
import tempfile
import random
import Queue
import threading
from IGMP import *
from McastTraffic import *
from Stats import Stats
from OnosCtrl import OnosCtrl
from OltConfig import OltConfig
from Channels import IgmpChannel
from EapTLS import TLSAuthTest
from scapy_ssl_tls.ssl_tls import *
from scapy_ssl_tls.ssl_tls_crypto import *
from EapolAAA import *
from Enum import *
import noseTlsAuthHolder as tlsAuthHolder
from tls_cert import Key
from socket import *
from CordTestServer import cord_test_radius_restart
import struct
import scapy
from CordTestBase import CordTester
from CordContainer import *
from CordLogger import CordLogger
from CordTestUtils import log_test
import re
from random import randint
from time import sleep
import json
from OnosFlowCtrl import OnosFlowCtrl
from OltConfig import OltConfig
from threading import current_thread
import collections
log_test.setLevel('INFO')
class IGMPTestState:
def __init__(self, groups = [], df = None, state = 0):
self.df = df
self.state = state
self.counter = 0
self.groups = groups
self.group_map = {} ##create a send/recv count map
for g in groups:
self.group_map[g] = (Stats(), Stats())
def update(self, group, tx = 0, rx = 0, t = 0):
self.counter += 1
index = 0 if rx == 0 else 1
v = tx if rx == 0 else rx
if self.group_map.has_key(group):
self.group_map[group][index].update(packets = v, t = t)
def update_state(self):
self.state = self.state ^ 1
class netCondition_exchange(CordLogger):
V_INF1 = 'veth0'
V_INF2 = 'veth1'
MGROUP1 = '172.16.17.32'
MGROUP2 = '172.16.31.10'
MINVALIDGROUP1 = '255.255.255.255'
MINVALIDGROUP2 = '172.16.58.3'
MMACGROUP1 = "01:00:5e:01:02:03"
MMACGROUP2 = "01:00:5e:02:02:03"
IGMP_DST_MAC = "01:00:5e:00:00:16"
IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
IP_SRC = '1.2.3.4'
IP_DST = '172.16.31.10'
NEGATIVE_TRAFFIC_STATUS = 1
igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
igmp_ip = IP(dst = IP_DST)
IGMP_TEST_TIMEOUT = 5
IGMP_QUERY_TIMEOUT = 60
MCAST_TRAFFIC_TIMEOUT = 10
TEST_TIMEOUT_DELAY = 340
PORT_TX_DEFAULT = 2
PORT_RX_DEFAULT = 1
max_packets = 100
app_igmp = 'org.opencord.igmp'
olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
ROVER_TEST_TIMEOUT = 10 #3600*86
ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
ROVER_JOIN_TIMEOUT = 60
app_tls = 'org.opencord.aaa'
TLS_TIMEOUT = 20
CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
<KEY>
-----END CERTIFICATE-----'''
def onos_aaa_config(self):
aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': '<PASSWORD>',
'radiusIp': '172.17.0.2' } } } }
radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
self.onos_ctrl.activate()
time.sleep(2)
self.onos_load_tls_config(aaa_dict)
def onos_load_tls_config(self, config):
status, code = OnosCtrl.config(config)
if status is False:
log_test.info('Configure request for AAA returned status %d' %code)
assert_equal(status, True)
time.sleep(3)
@classmethod
def setUpClass(cls):
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
cls.port_map, _ = cls.olt.olt_port_map()
OnosCtrl.cord_olt_config(cls.olt)
cls.device_id = OnosCtrl.get_device_id()
@classmethod
def tearDownClass(cls): pass
def setUp_igmp(self):
''' Activate the igmp app'''
apps = self.app_igmp
self.onos_ctrl = OnosCtrl(apps)
self.onos_aaa_config()
self.onos_ctrl.activate()
self.igmp_channel = IgmpChannel()
def setUp_tls(self):
''' Activate the aaa app'''
apps = self.app_tls
self.onos_ctrl = OnosCtrl(apps)
self.onos_aaa_config()
def tearDown(self):
'''Deactivate the dhcp app'''
apps = [self.app_igmp, self.app_tls]
for app in apps:
onos_ctrl = OnosCtrl(app)
onos_ctrl.deactivate()
def onos_load_igmp_config(self, config):
log_test.info('onos load config is %s'%config)
status, code = OnosCtrl.config(config)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(2)
def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
ssm_dict = {'apps' : { 'org.onosproject.igmp' : { 'ssmTranslate' : [] } } }
ssm_xlate_list = ssm_dict['apps']['org.onosproject.igmp']['ssmTranslate']
if flag: #to maintain seperate group-source pair.
for i in range(len(groups)):
d = {}
d['source'] = src_list[i] or '0.0.0.0'
d['group'] = groups[i]
ssm_xlate_list.append(d)
else:
for g in groups:
for s in src_list:
d = {}
d['source'] = s or '0.0.0.0'
d['group'] = g
ssm_xlate_list.append(d)
self.onos_load_igmp_config(ssm_dict)
cord_port_map = {}
for g in groups:
cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
self.igmp_channel.cord_port_table_load(cord_port_map)
time.sleep(2)
def mcast_ip_range(self,start_ip = '172.16.58.3', end_ip = '172.16.17.32'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range
def random_mcast_ip(self,start_ip = '172.16.58.3', end_ip = '172.16.17.32'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
def source_ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range
def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
def get_igmp_intf(self):
inst = os.getenv('TEST_INSTANCE', None)
if not inst:
return 'veth0'
inst = int(inst) + 1
if inst >= self.port_map['uplink']:
inst += 1
if self.port_map.has_key(inst):
return self.port_map[inst]
return 'veth0'
def igmp_verify_join(self, igmpStateList):
sendState, recvState = igmpStateList
## check if the send is received for the groups
for g in sendState.groups:
tx_stats = sendState.group_map[g][0]
tx = tx_stats.count
assert_greater(tx, 0)
rx_stats = recvState.group_map[g][1]
rx = rx_stats.count
assert_greater(rx, 0)
log_test.info('Receive stats %s for group %s' %(rx_stats, g))
log_test.info('IGMP test verification success')
def igmp_verify_leave(self, igmpStateList, leave_groups):
sendState, recvState = igmpStateList[0], igmpStateList[1]
## check if the send is received for the groups
for g in sendState.groups:
tx_stats = sendState.group_map[g][0]
rx_stats = recvState.group_map[g][1]
tx = tx_stats.count
rx = rx_stats.count
assert_greater(tx, 0)
if g not in leave_groups:
log_test.info('Received %d packets for group %s' %(rx, g))
for g in leave_groups:
rx = recvState.group_map[g][1].count
assert_equal(rx, 0)
log_test.info('IGMP test verification success')
def mcast_traffic_timer(self):
self.mcastTraffic.stopReceives()
def send_mcast_cb(self, send_state):
for g in send_state.groups:
send_state.update(g, tx = 1)
return 0
##Runs in the context of twisted reactor thread
def igmp_recv(self, igmpState, iface = 'veth0'):
p = self.recv_socket.recv()
try:
send_time = float(p.payload.load)
recv_time = monotonic.monotonic()
except:
log_test.info('Unexpected Payload received: %s' %p.payload.load)
return 0
#log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
return 0
def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1, ip_src = None):
if ssm_load is True:
self.onos_ssm_table_load(groups, src_list)
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype= record_type, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
if ip_src is None:
ip_pkt = self.igmp_eth/self.igmp_ip
else:
igmp_ip_src = IP(dst = self.IP_DST, src = ip_src)
ip_pkt = self.igmp_eth/igmp_ip_src
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
sendp(pkt, iface=iface)
if delay != 0:
time.sleep(delay)
def send_igmp_join_negative(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1, ip_src = None, invalid_igmp_join = None ):
if ssm_load is True:
self.onos_ssm_table_load(groups, src_list)
if invalid_igmp_join == 'igmp_type':
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT_NEGATIVE, max_resp_code=30,
gaddr=self.IP_DST)
else:
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
if invalid_igmp_join == 'record_type':
record_type = IGMP_V3_GR_TYPE_INCLUDE_NEGATIVE
for g in groups:
gr = IGMPv3gr(rtype= record_type, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
if ip_src is None:
ip_pkt = self.igmp_eth/self.igmp_ip
else:
igmp_ip_src = IP(dst = self.IP_DST, src = ip_src)
ip_pkt = self.igmp_eth/igmp_ip_src
pkt = ip_pkt/igmp
if invalid_igmp_join == 'ttl':
set_ttl = | |
another_module.data == another_payload
# check definition histories
new_history = modulestore().get_definition_history_info(new_module.definition_locator)
assert new_history['previous_version'] is None
assert new_history['original_version'] == new_module.definition_locator.definition_id
assert new_history['edited_by'] == 'anotheruser'
another_history = modulestore().get_definition_history_info(another_module.definition_locator)
assert another_history['previous_version'] == original.definition_locator.definition_id
def test_encoded_naming(self):
"""
Check that using odd characters in block id don't break ability to add and retrieve block.
"""
course_key = CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT)
parent_locator = BlockUsageLocator(course_key, 'course', block_id="head345679")
chapter_locator = BlockUsageLocator(course_key, 'chapter', block_id="foo.bar_-~:0")
modulestore().create_child(
'anotheruser', parent_locator, 'chapter',
block_id=chapter_locator.block_id,
fields={'display_name': 'chapter 99'},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(chapter_locator)
assert new_module.location.block_id == 'foo.bar_-~:0'
# hardcode to ensure BUL init didn't change
# now try making that a parent of something
new_payload = "<problem>empty</problem>"
problem_locator = BlockUsageLocator(course_key, 'problem', block_id="prob.bar_-~:99a")
modulestore().create_child(
'anotheruser', chapter_locator, 'problem',
block_id=problem_locator.block_id,
fields={'display_name': 'chapter 99', 'data': new_payload},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(problem_locator)
assert new_module.location.block_id == problem_locator.block_id
chapter = modulestore().get_item(chapter_locator)
assert problem_locator in version_agnostic(chapter.children)
def test_create_bulk_operations(self):
"""
Test create_item using bulk_operations
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
new_course = modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT) # lint-amnesty, pylint: disable=line-too-long
new_course_locator = new_course.id
index_history_info = modulestore().get_course_history_info(new_course.location.course_key)
course_block_prev_version = new_course.previous_version
course_block_update_version = new_course.update_version
assert new_course_locator.version_guid is not None, 'Want to test a definite version'
versionless_course_locator = new_course_locator.version_agnostic()
# positive simple case: no force, add chapter
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 1'},
)
# version info shouldn't change
assert new_ele.update_version == course_block_update_version
assert new_ele.update_version == new_ele.location.version_guid
refetch_course = modulestore().get_course(versionless_course_locator)
assert refetch_course.location.version_guid == new_course.location.version_guid
assert refetch_course.previous_version == course_block_prev_version
assert refetch_course.update_version == course_block_update_version
refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location.course_key)
assert refetch_index_history_info == index_history_info
assert new_ele.location.version_agnostic() in version_agnostic(refetch_course.children)
# try to create existing item
with pytest.raises(DuplicateItemError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
block_id=new_ele.location.block_id,
fields={'display_name': 'chapter 2'},
)
# start a new transaction
with modulestore().bulk_operations(course_key):
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
)
transaction_guid = new_ele.location.version_guid
# ensure force w/ continue gives exception
with pytest.raises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
force=True
)
# ensure trying to continue the old one gives exception
with pytest.raises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 3'},
)
# add new child to old parent in continued (leave off version_guid)
course_module_locator = new_course.location.version_agnostic()
new_ele = modulestore().create_child(
user, course_module_locator, 'chapter',
fields={'display_name': 'chapter 4'},
)
assert new_ele.update_version != course_block_update_version
assert new_ele.location.version_guid == transaction_guid
# check children, previous_version
refetch_course = modulestore().get_course(versionless_course_locator)
assert new_ele.location.version_agnostic() in version_agnostic(refetch_course.children)
assert refetch_course.previous_version == course_block_update_version
assert refetch_course.update_version == transaction_guid
def test_bulk_ops_org_filtering(self):
"""
Make sure of proper filtering when using bulk operations and
calling get_courses with an 'org' filter
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
assert len(courses) == 1
assert courses[0].id.org == course_key.org
assert courses[0].id.course == course_key.course
assert courses[0].id.run == course_key.run
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
assert len(courses) == 0
# re-assert after the end of the with scope
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
assert len(courses) == 1
assert courses[0].id.org == course_key.org
assert courses[0].id.course == course_key.course
assert courses[0].id.run == course_key.run
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
assert len(courses) == 0
def test_update_metadata(self):
"""
test updating an items metadata ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT),
'problem', block_id="problem3_2"
)
problem = modulestore().get_item(locator)
pre_def_id = problem.definition_locator.definition_id
pre_version_guid = problem.location.version_guid
assert pre_def_id is not None
assert pre_version_guid is not None
assert problem.max_attempts != 4, 'Invalidates rest of test'
problem.max_attempts = 4
problem.save() # decache above setting into the kvs
updated_problem = modulestore().update_item(problem, self.user_id)
# check that course version changed and course's previous is the other one
assert updated_problem.definition_locator.definition_id == pre_def_id
assert updated_problem.location.version_guid != pre_version_guid
assert updated_problem.max_attempts == 4
# refetch to ensure original didn't change
original_location = problem.location.map_into_course(CourseLocator(version_guid=pre_version_guid))
problem = modulestore().get_item(original_location)
assert problem.max_attempts != 4, 'original changed'
current_course = modulestore().get_course(locator.course_key)
assert updated_problem.location.version_guid == current_course.location.version_guid
history_info = modulestore().get_course_history_info(current_course.location.course_key)
assert history_info['previous_version'] == pre_version_guid
assert history_info['edited_by'] == self.user_id
def test_update_children(self):
"""
test updating an item's children ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter3'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
# reorder children
assert len(block.children) > 0, 'meaningless test'
moved_child = block.children.pop()
block.save() # decache model changes
updated_problem = modulestore().update_item(block, self.user_id)
# check that course version changed and course's previous is the other one
assert updated_problem.definition_locator.definition_id == pre_def_id
assert updated_problem.location.version_guid != pre_version_guid
assert version_agnostic(updated_problem.children) == version_agnostic(block.children)
assert moved_child not in version_agnostic(updated_problem.children)
locator = locator.course_key.make_usage_key('chapter', "chapter1")
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_updated = modulestore().update_item(other_block, self.user_id)
assert moved_child.version_agnostic() in version_agnostic(other_updated.children)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_update_definition(self, _from_json):
"""
test updating an item's definition: ensure it gets versioned as well as the course getting versioned
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head12345'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
block.grading_policy['GRADER'][0]['min_count'] = 13
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
assert updated_block.definition_locator.definition_id != pre_def_id
assert updated_block.location.version_guid != pre_version_guid
assert updated_block.grading_policy['GRADER'][0]['min_count'] == 13
def test_update_manifold(self):
"""
Test updating metadata, children, and definition in a single call ensuring all the versioning occurs
"""
locator = BlockUsageLocator(
CourseLocator('testx', 'GreekHero', 'run', branch=BRANCH_NAME_DRAFT),
'problem', block_id='problem1'
)
original = modulestore().get_item(locator)
# first add 2 children to the course for the update to manipulate
locator = BlockUsageLocator(
CourseLocator('guestx', 'contender', 'run', branch=BRANCH_NAME_DRAFT),
'course', block_id="head345679"
)
category = 'problem'
new_payload = "<problem>empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=original.definition_locator,
)
# pylint: disable=protected-access
modulestore()._clear_cache()
# now begin the test
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
assert block.grading_policy['GRADER'][0]['min_count'] != 13
block.grading_policy['GRADER'][0]['min_count'] = 13
block.children = block.children[1:] + [block.children[0]]
block.advertised_start = "Soon"
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
assert updated_block.definition_locator.definition_id != pre_def_id
assert updated_block.location.version_guid != pre_version_guid
assert updated_block.grading_policy['GRADER'][0]['min_count'] == 13
assert updated_block.children[0].version_agnostic() == block.children[0].version_agnostic()
assert updated_block.advertised_start == 'Soon'
def test_delete_item(self):
course = self.create_course_for_deletion()
with pytest.raises(ValueError):
modulestore().delete_item(course.location, self.user_id)
reusable_location = course.id.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
# delete a leaf
problems = modulestore().get_items(reusable_location, qualifiers={'category': 'problem'})
locn_to_del = problems[0].location
new_course_loc = modulestore().delete_item(locn_to_del, self.user_id)
deleted = locn_to_del.version_agnostic()
assert not modulestore().has_item(deleted)
with pytest.raises(VersionConflictError):
modulestore().has_item(locn_to_del)
with pytest.raises(ValueError):
modulestore().delete_item(deleted, self.user_id)
assert modulestore().has_item(locn_to_del.course_agnostic())
assert new_course_loc.version_guid != course.location.version_guid
# delete a subtree
nodes = modulestore().get_items(reusable_location, qualifiers={'category': 'chapter'})
new_course_loc = modulestore().delete_item(nodes[0].location, self.user_id)
# check subtree
def check_subtree(node):
"""
Check contents of subtree recursively
"""
if node:
node_loc = node.location
assert not modulestore().has_item(node_loc.version_agnostic())
assert modulestore().has_item(node_loc.course_agnostic())
if node.has_children:
for sub in node.get_children():
check_subtree(sub)
check_subtree(nodes[0])
def create_course_for_deletion(self):
"""
Create a course we can delete
"""
course = modulestore().create_course('nihilx', 'deletion', 'run', TEST_USER_ID, BRANCH_NAME_DRAFT)
root = course.location.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
for _ in range(4):
self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])
return modulestore().get_item(root)
def create_subtree_for_deletion(self, parent, category_queue):
"""
Create a subtree in the tb deleted course
"""
if not category_queue:
return
node = modulestore().create_child(
TEST_USER_ID, parent.version_agnostic(), category_queue[0]
)
node_loc = node.location.map_into_course(parent.course_key)
for _ in range(4):
self.create_subtree_for_deletion(node_loc, category_queue[1:])
def test_split_modulestore_create_child_with_position(self):
"""
This test is designed to hit a specific set of use cases having to do with
the child positioning logic found in split_mongo/split.py:create_child()
"""
# Set up the split module store
store = modulestore()
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with store.bulk_operations(course_key):
new_course = store.create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
new_course_locator = new_course.id
versionless_course_locator = new_course_locator.version_agnostic()
first_child = store.create_child(
self.user_id,
new_course.location,
"chapter"
)
refetch_course = store.get_course(versionless_course_locator)
second_child = store.create_child(
self.user_id,
refetch_course.location,
"chapter",
position=0
)
# First child should have been moved to second position, and better child takes the lead
refetch_course = store.get_course(versionless_course_locator)
children = refetch_course.get_children()
assert str(children[1].location) == str(first_child.location)
assert str(children[0].location) == str(second_child.location)
# Clean up the data so we don't break other tests which apparently expect a particular state
store.delete_course(refetch_course.id, user)
class TestCourseCreation(SplitModuleTest):
"""
Test create_course
"""
def test_simple_creation(self):
"""
The simplest case but probing all expected results from it.
"""
# Oddly getting differences of 200nsec
new_course = modulestore().create_course(
'test_org', | |
= self.robot_position.copy()
robot_new_heading = self.robot_heading
heading_diff = heading_difference(self.robot_heading, robot_waypoint_heading)
if np.abs(heading_diff) > TURN_STEP_SIZE:
# Turn towards next waypoint first
robot_new_heading += np.sign(heading_diff) * TURN_STEP_SIZE
else:
dx = robot_waypoint_position[0] - self.robot_position[0]
dy = robot_waypoint_position[1] - self.robot_position[1]
if distance(self.robot_position, robot_waypoint_position) < MOVE_STEP_SIZE:
robot_new_position = robot_waypoint_position
else:
if robot_waypoint_index == len(robot_waypoint_positions) - 1:
move_sign = robot_move_sign
else:
move_sign = 1
robot_new_heading = np.arctan2(move_sign * dy, move_sign * dx)
robot_new_position[0] += move_sign * MOVE_STEP_SIZE * np.cos(robot_new_heading)
robot_new_position[1] += move_sign * MOVE_STEP_SIZE * np.sin(robot_new_heading)
# Set new constraint to move the robot to new pose
p.changeConstraint(self.robot_cid, jointChildPivot=robot_new_position, jointChildFrameOrientation=p.getQuaternionFromEuler([0, 0, robot_new_heading]), maxForce=MOVEMENT_MAX_FORCE)
p.stepSimulation()
# Get new robot pose
self.robot_position, self.robot_heading = self._get_robot_pose()
self.robot_position[2] = 0
# Stop moving if robot collided with obstacle
if distance(robot_prev_waypoint_position, self.robot_position) > MOVE_STEP_SIZE:
contact_points = p.getContactPoints(self.robot_id)
if len(contact_points) > 0:
for contact_point in contact_points:
if contact_point[2] in self.obstacle_ids + [self.robot_id]:
robot_is_moving = False
robot_hit_obstacle = True
break # Note: self.robot_distance does not get not updated
# Robot no longer turning or moving
if (distance(self.robot_position, robot_prev_position) < NOT_MOVING_THRESHOLD
and np.abs(self.robot_heading - robot_prev_heading) < NOT_TURNING_THRESHOLD):
# Update distance moved
robot_distance += distance(robot_prev_waypoint_position, self.robot_position)
if self.show_debug_annotations:
p.addUserDebugLine(robot_prev_waypoint_position[:2] + [0.001], self.robot_position[:2] + [0.001], DEBUG_LINE_COLOR)
# Increment waypoint index, or stop moving if done
if robot_waypoint_index == len(robot_waypoint_positions) - 1:
robot_is_moving = False
else:
robot_waypoint_index += 1
robot_prev_waypoint_position = robot_waypoint_positions[robot_waypoint_index - 1]
robot_waypoint_position = robot_waypoint_positions[robot_waypoint_index]
robot_waypoint_heading = robot_waypoint_headings[robot_waypoint_index]
# Break if robot is stuck
sim_steps += 1
if sim_steps > STEP_LIMIT:
break # Note: self.robot_distance does not get not updated
if sim_steps % MAP_UPDATE_STEPS == 0:
self._update_state()
if self.show_occupancy_map:
self._update_occupancy_map_visualization(robot_waypoint_positions, robot_target_end_effector_position)
# Step the simulation until everything is still
self._step_simulation_until_still()
################################################################################
# Process cubes
# Store final cube positions and remove cubes that are above the wall
to_remove = []
final_cube_positions = {}
for cube_id in self.available_cube_ids_set:
cube_position, _ = p.getBasePositionAndOrientation(cube_id)
final_cube_positions[cube_id] = cube_position
if cube_position[2] > WALL_HEIGHT + 0.49 * CUBE_WIDTH or cube_position[2] < CUBE_WIDTH / 4:
self._remove_cube(cube_id, out_of_bounds=True)
to_remove.append(cube_id)
for cube_id in to_remove:
self.available_cube_ids_set.remove(cube_id)
# Give partial rewards
for cube_id in self.available_cube_ids_set:
cube_position = final_cube_positions[cube_id]
if self.use_shortest_path_partial_rewards:
dist = self._shortest_path_distance(cube_position, self.receptacle_position, configuration_space=initial_configuration_space)
else:
dist = distance(cube_position, self.receptacle_position)
dist_moved = initial_cube_distances[cube_id] - dist
robot_reward += self.partial_rewards_scale * dist_moved
# Give rewards for cubes in receptacle (and remove the cubes)
to_remove = []
for cube_id in self.available_cube_ids_set:
cube_position = final_cube_positions[cube_id]
if self._cube_position_in_receptacle(cube_position):
to_remove.append(cube_id)
self._remove_cube(cube_id)
robot_cubes += 1
robot_reward += 1
for cube_id in to_remove:
self.available_cube_ids_set.remove(cube_id)
################################################################################
# Update state representation
self.robot_position, self.robot_heading = self._get_robot_pose()
self._update_state()
if self.show_occupancy_map:
self._update_occupancy_map_visualization(robot_waypoint_positions, robot_target_end_effector_position)
################################################################################
# Compute stats
# Get final pose
self.robot_position, self.robot_heading = self._get_robot_pose()
# Add distance traveled to cumulative distance
self.robot_cumulative_distance += robot_distance
# Calculate amount turned to check if robot turned this step
robot_turn_angle = heading_difference(robot_initial_heading, self.robot_heading)
# Increment inactivity counter, which measures steps elapsed since the previous cube was stashed
if robot_cubes == 0:
self.inactivity_counter += 1
# Determine whether episode is done
done = False
if len(self.removed_cube_ids_set) == self.num_cubes or self.inactivity_counter >= self.inactivity_cutoff:
done = True
# Compute reward for the step
if robot_hit_obstacle:
robot_reward -= self.collision_penalty
if robot_distance < NONMOVEMENT_DIST_THRESHOLD and abs(robot_turn_angle) < NONMOVEMENT_TURN_THRESHOLD:
robot_reward -= self.nonmovement_penalty
self.robot_cumulative_cubes += robot_cubes
self.robot_cumulative_reward += robot_reward
# Compute items to return
state = self.get_state(done=done)
reward = robot_reward
ministeps = robot_distance / self.ministep_size
info = {
'ministeps': ministeps,
'inactivity': self.inactivity_counter,
'cumulative_cubes': self.robot_cumulative_cubes,
'cumulative_distance': self.robot_cumulative_distance,
'cumulative_reward': self.robot_cumulative_reward,
}
return state, reward, done, info
@staticmethod
def close():
p.disconnect()
@staticmethod
def get_state_width():
return LOCAL_MAP_PIXEL_WIDTH
def get_action_space(self):
if self.use_steering_commands:
return self.steering_commands_num_turns
return LOCAL_MAP_PIXEL_WIDTH * LOCAL_MAP_PIXEL_WIDTH
def get_camera_image(self, image_width=1024, image_height=768):
renderer = p.ER_BULLET_HARDWARE_OPENGL if self.use_gui else p.ER_TINY_RENDERER
_, _, rgb, _, _ = p.getCameraImage(image_width, image_height, flags=p.ER_NO_SEGMENTATION_MASK, renderer=renderer)
return rgb
@staticmethod
def start_video_logging(video_path):
return p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, video_path)
@staticmethod
def stop_video_logging(log_id):
p.stopStateLogging(log_id)
def _create_environment(self):
# Create floor
with tempfile.TemporaryDirectory() as tmp_dir_name:
# Create custom obj and urdf for current room size
room_length_with_walls = self.room_length + 2 * WALL_THICKNESS
room_width_with_walls = self.room_width + 2 * WALL_THICKNESS
plane_obj_path = str(Path(tmp_dir_name) / 'plane.obj')
with open(self.assets_dir / 'plane.obj.template') as f1:
with open(plane_obj_path, 'w') as f2:
f2.write(f1.read().replace('HALFLENGTH', str(room_length_with_walls / GLOBAL_SCALING / 2)).replace('HALFWIDTH', str(room_width_with_walls / GLOBAL_SCALING / 2)))
plane_urdf_path = str(Path(tmp_dir_name) / 'plane.urdf')
with open(self.assets_dir / 'plane.urdf.template') as f1:
with open(plane_urdf_path, 'w') as f2:
f2.write(f1.read().replace('LENGTH', str(room_length_with_walls / GLOBAL_SCALING)).replace('WIDTH', str(room_width_with_walls / GLOBAL_SCALING)))
p.loadURDF(plane_urdf_path, globalScaling=GLOBAL_SCALING)
# Create obstacles (including walls)
self.obstacle_ids = self._create_obstacles()
self.min_obstacle_id = min(self.obstacle_ids)
self.max_obstacle_id = max(self.obstacle_ids)
# Create trash receptacle
receptacle_collision_shape_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=[0, 0, 0])
receptacle_visual_shape_id = p.createVisualShape(p.GEOM_BOX, halfExtents=[RECEPTACLE_WIDTH / 2, RECEPTACLE_WIDTH / 2, 0], rgbaColor=RECEPTACLE_COLOR, visualFramePosition=[0, 0, 0.0001])
self.receptacle_id = p.createMultiBody(0.01, receptacle_collision_shape_id, receptacle_visual_shape_id, self.receptacle_position)
# Create cubes
cube_collision_shape_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=(3 * [CUBE_WIDTH / 2]))
cube_visual_shape_id = p.createVisualShape(p.GEOM_BOX, halfExtents=(3 * [CUBE_WIDTH / 2]), rgbaColor=CUBE_COLOR)
self.cube_ids = []
for _ in range(self.num_cubes):
self.cube_ids.append(p.createMultiBody(CUBE_MASS, cube_collision_shape_id, cube_visual_shape_id))
self.min_cube_id = min(self.cube_ids)
self.max_cube_id = max(self.cube_ids)
# Create robot and initialize contraint
self.robot_id = p.loadURDF(str(self.assets_dir / 'robot.urdf'))
self.robot_cid = p.createConstraint(self.robot_id, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0], [0, 0, 0])
def _create_obstacles(self):
obstacles = []
# Create walls
for x, y, length, width in [
(-self.room_length / 2 - WALL_THICKNESS / 2, 0, WALL_THICKNESS, self.room_width),
(self.room_length / 2 + WALL_THICKNESS / 2, 0, WALL_THICKNESS, self.room_width),
(0, -self.room_width / 2 - WALL_THICKNESS / 2, self.room_length + 2 * WALL_THICKNESS, WALL_THICKNESS),
(0, self.room_width / 2 + WALL_THICKNESS / 2, self.room_length + 2 * WALL_THICKNESS, WALL_THICKNESS)
]:
obstacles.append({'type': 'wall', 'position': (x, y), 'heading': 0, 'length': length, 'width': width})
def get_obstacle_box(obstacle, buffer_width=0.08):
x, y = obstacle['position']
length, width = obstacle['length'], obstacle['width']
b = box(x - length / 2, y - width / 2, x + length / 2, y + width / 2)
if buffer_width > 0:
b = b.buffer(buffer_width)
return b
def get_receptacle_box():
obstacle = {'position': self.receptacle_position[:2], 'heading': 0, 'length': RECEPTACLE_WIDTH, 'width': RECEPTACLE_WIDTH}
return get_obstacle_box(obstacle, buffer_width=0)
def add_random_columns(obstacles, max_num_columns):
num_columns = self.random_state.randint(max_num_columns) + 1
column_length = 0.1
column_width = 0.1
buffer_width = 0.08
polygons = [get_receptacle_box()] + [get_obstacle_box(obstacle) for obstacle in obstacles]
for _ in range(10):
new_obstacles = []
new_polygons = []
polygon_union = unary_union(polygons)
for _ in range(num_columns):
for _ in range(100):
x = self.random_state.uniform(
-self.room_length / 2 + 2 * buffer_width + column_length / 2,
self.room_length / 2 - 2 * buffer_width - column_length / 2
)
y = self.random_state.uniform(
-self.room_width / 2 + 2 * buffer_width + column_width / 2,
self.room_width / 2 - 2 * buffer_width - column_width / 2
)
obstacle = {'type': 'column', 'position': (x, y), 'heading': 0, 'length': column_length, 'width': column_width}
b = get_obstacle_box(obstacle)
if not polygon_union.intersects(b):
new_obstacles.append(obstacle)
new_polygons.append(b)
polygon_union = unary_union(polygons + new_polygons)
break
if len(new_obstacles) == num_columns:
break
return new_obstacles
def add_random_horiz_divider():
divider_length = 0.8
divider_width = 0.05
buffer_width = (2 + np.sqrt(2)) * ROUNDED_CORNER_WIDTH
polygons = unary_union([get_receptacle_box()])
for _ in range(10):
new_obstacles = []
for _ in range(100):
x = self.room_length / 2 - divider_length / 2
y = self.random_state.uniform(
-self.room_width / 2 + buffer_width + divider_width / 2,
self.room_width / 2 - buffer_width - divider_width / 2
)
obstacle = {'type': 'divider', 'position': [x, y], 'heading': 0, 'length': divider_length, 'width': divider_width}
b_no_inflate = get_obstacle_box(obstacle, buffer_width=0)
if not polygons.intersects(b_no_inflate):
new_obstacles.append(obstacle)
break
if len(new_obstacles) == 1:
break
return new_obstacles
# Create obstacles
if self.obstacle_config == 'small_empty':
pass
elif self.obstacle_config == 'small_columns':
obstacles.extend(add_random_columns(obstacles, 3))
elif self.obstacle_config == 'large_columns':
obstacles.extend(add_random_columns(obstacles, 8))
elif self.obstacle_config == 'large_divider':
obstacles.extend(add_random_horiz_divider())
else:
raise Exception(self.obstacle_config)
# Create room corners
for i, (x, y) in enumerate([
(-self.room_length / 2, self.room_width / 2),
(self.room_length / 2, self.room_width / 2),
(self.room_length / 2, -self.room_width / 2),
(-self.room_length / 2, -self.room_width / 2)
]):
if i == 1: # Skip the receptacle corner
continue
heading = -np.radians(i * 90)
offset = ROUNDED_CORNER_WIDTH / np.sqrt(2)
adjusted_position = (x + offset * np.cos(heading - np.radians(45)), y + offset * np.sin(heading - np.radians(45)))
obstacles.append({'type': 'corner', 'position': adjusted_position, 'heading': heading})
# Create additional corners for the divider
new_obstacles = []
for obstacle in obstacles:
if obstacle['type'] == 'divider':
(x, y), length, width = obstacle['position'], obstacle['length'], obstacle['width']
corner_positions = [(self.room_length / 2, y | |
"timerclockbase"), TimerClockDivisor = parser.getint(section, "timerclockdivisor"))
# Set Timers / Counters
section = "Timers And Counters"
if parser.has_section(section):
nte = None
c0e = None
c1e = None
cpo = None
if parser.has_option(section, "NumberTimersEnabled"):
nte = parser.getint(section, "NumberTimersEnabled")
if parser.has_option(section, "TimerCounterPinOffset"):
cpo = parser.getint(section, "TimerCounterPinOffset")
if parser.has_option(section, "Counter0Enabled"):
c0e = parser.getboolean(section, "Counter0Enabled")
if parser.has_option(section, "Counter1Enabled"):
c1e = parser.getboolean(section, "Counter1Enabled")
self.configIO(NumberOfTimersEnabled = nte, EnableCounter1 = c1e, EnableCounter0 = c0e, TimerCounterPinOffset = cpo)
mode = None
value = None
if parser.has_option(section, "timer0 mode"):
mode = parser.getint(section, "timer0 mode")
if parser.has_option(section, "timer0 value"):
value = parser.getint(section, "timer0 value")
self.getFeedback( Timer0Config(mode, value) )
if parser.has_option(section, "timer1 mode"):
mode = parser.getint(section, "timer1 mode")
if parser.has_option(section, "timer1 value"):
value = parser.getint(section, "timer1 value")
self.getFeedback( Timer1Config(mode, value) )
loadConfig.section = 3
class FeedbackCommand(object):
"""
The FeedbackCommand class is the base for all the Feedback commands.
"""
readLen = 0
def handle(self, input):
return None
class AIN(FeedbackCommand):
'''
Analog Input Feedback command
specify the positive and negative channels to use
(0-16, 30 and 31 are possible)
also specify whether to turn on longSettle or quick Sample
returns 16-bit signed int sample
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.AIN(PositiveChannel = 0, NegativeChannel=31, LongSettling=False, QuickSample=False))
Sent: [0x1b, 0xf8, 0x2, 0x0, 0x20, 0x0, 0x0, 0x1, 0x0, 0x1f]
Response: [0xab, 0xf8, 0x3, 0x0, 0xaf, 0x0, 0x0, 0x0, 0x0, 0x20, 0x8f, 0x0]
[36640]
'''
def __init__(self, PositiveChannel, NegativeChannel=31,
LongSettling=False, QuickSample=False):
self.positiveChannel = PositiveChannel
self.negativeChannel = NegativeChannel
self.longSettling = LongSettling
self.quickSample = QuickSample
validChannels = list(range(16)) + [30, 31]
if PositiveChannel not in validChannels:
raise Exception("Invalid Positive Channel specified")
if NegativeChannel not in validChannels:
raise Exception("Invalid Negative Channel specified")
b = PositiveChannel
b |= (int(bool(LongSettling)) << 6)
b |= (int(bool(QuickSample)) << 7)
self.cmdBytes = [ 0x01, b, NegativeChannel ]
readLen = 2
def __repr__(self):
return "<u3.AIN( PositiveChannel = %s, NegativeChannel = %s, LongSettling = %s, QuickSample = %s )>" % ( self.positiveChannel, self.negativeChannel, self.longSettling, self.quickSample )
def handle(self, input):
result = (input[1] << 8) + input[0]
return result
class WaitShort(FeedbackCommand):
'''
WaitShort Feedback command
specify the number of 128us time increments to wait
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.WaitShort(Time = 9))
Sent: [0x9, 0xf8, 0x2, 0x0, 0xe, 0x0, 0x0, 0x5, 0x9, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
'''
def __init__(self, Time):
self.time = Time % 256
self.cmdBytes = [ 5, Time % 256 ]
def __repr__(self):
return "<u3.WaitShort( Time = %s )>" % self.time
class WaitLong(FeedbackCommand):
'''
WaitLong Feedback command
specify the number of 32ms time increments to wait
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.WaitLong(Time = 70))
Sent: [0x47, 0xf8, 0x2, 0x0, 0x4c, 0x0, 0x0, 0x6, 0x46, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
'''
def __init__(self, Time):
self.time = Time % 256
self.cmdBytes = [ 6, Time % 256 ]
def __repr__(self):
return "<u3.WaitLong( Time = %s )>" % self.time
class LED(FeedbackCommand):
'''
LED Toggle
specify whether the LED should be on or off by truth value
1 or True = On, 0 or False = Off
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.LED(State = False))
Sent: [0x4, 0xf8, 0x2, 0x0, 0x9, 0x0, 0x0, 0x9, 0x0, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
>>> d.getFeedback(u3.LED(State = True))
Sent: [0x5, 0xf8, 0x2, 0x0, 0xa, 0x0, 0x0, 0x9, 0x1, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
'''
def __init__(self, State):
self.state = State
self.cmdBytes = [ 9, int(bool(State)) ]
def __repr__(self):
return "<u3.LED( State = %s )>" % self.state
class BitStateRead(FeedbackCommand):
'''
BitStateRead Feedback command
read the state of a single bit of digital I/O. Only digital
lines return valid readings.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
return 0 or 1
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.BitStateRead(IONumber = 5))
Sent: [0xa, 0xf8, 0x2, 0x0, 0xf, 0x0, 0x0, 0xa, 0x5, 0x0]
Response: [0xfb, 0xf8, 0x2, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1]
[1]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 10, IONumber % 20 ]
readLen = 1
def __repr__(self):
return "<u3.BitStateRead( IONumber = %s )>" % self.ioNumber
def handle(self, input):
return int(bool(input[0]))
class BitStateWrite(FeedbackCommand):
'''
BitStateWrite Feedback command
write a single bit of digital I/O. The direction of the
specified line is forced to output.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
State: 0 or 1
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.BitStateWrite(IONumber = 5, State = 0))
Sent: [0xb, 0xf8, 0x2, 0x0, 0x10, 0x0, 0x0, 0xb, 0x5, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
'''
def __init__(self, IONumber, State):
self.ioNumber = IONumber
self.state = State
self.cmdBytes = [ 11, (IONumber % 20) + (int(bool(State)) << 7) ]
def __repr__(self):
return "<u3.BitStateWrite( IONumber = %s, State = %s )>" % (self.ioNumber, self.state)
class BitDirRead(FeedbackCommand):
'''
Read the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
returns 1 = Output, 0 = Input
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.BitDirRead(IONumber = 5))
Sent: [0xc, 0xf8, 0x2, 0x0, 0x11, 0x0, 0x0, 0xc, 0x5, 0x0]
Response: [0xfb, 0xf8, 0x2, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1]
[1]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 12, IONumber % 20 ]
readLen = 1
def __repr__(self):
return "<u3.BitDirRead( IONumber = %s )>" % self.ioNumber
def handle(self, input):
return int(bool(input[0]))
class BitDirWrite(FeedbackCommand):
'''
BitDirWrite Feedback command
Set the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
Direction: 1 = Output, 0 = Input
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.BitDirWrite(IONumber = 5, Direction = 0))
Sent: [0xd, 0xf8, 0x2, 0x0, 0x12, 0x0, 0x0, 0xd, 0x5, 0x0]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
'''
def __init__(self, IONumber, Direction):
self.ioNumber = IONumber
self.direction = Direction
self.cmdBytes = [ 13, (IONumber % 20) + (int(bool(Direction)) << 7) ]
def __repr__(self):
return "<u3.BitDirWrite( IONumber = %s, Direction = %s )>" % (self.ioNumber, self.direction)
class PortStateRead(FeedbackCommand):
"""
PortStateRead Feedback command
Reads the state of all digital I/O.
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.PortStateRead())
Sent: [0x14, 0xf8, 0x1, 0x0, 0x1a, 0x0, 0x0, 0x1a]
Response: [0xeb, 0xf8, 0x3, 0x0, 0xee, 0x1, 0x0, 0x0, 0x0, 0xe0, 0xff, 0xf]
[{'CIO': 15, 'FIO': 224, 'EIO': 255}]
"""
def __init__(self):
self.cmdBytes = [ 26 ]
readLen = 3
def handle(self, input):
return {'FIO' : input[0], 'EIO' : input[1], 'CIO' : input[2] }
def __repr__(self):
return "<u3.PortStateRead()>"
class PortStateWrite(FeedbackCommand):
"""
PortStateWrite Feedback command
State: A list of 3 bytes representing FIO, EIO, CIO
WriteMask: A list of 3 bytes, representing which to update.
The Default is all ones.
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.PortStateWrite(State = [0xab, 0xcd, 0xef], WriteMask = [0xff, 0xff, 0xff]))
Sent: [0x81, 0xf8, 0x4, 0x0, 0x7f, 0x5, 0x0, 0x1b, 0xff, 0xff, 0xff, 0xab, 0xcd, 0xef]
Response: [0xfa, 0xf8, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]
[None]
"""
def __init__(self, State, WriteMask = [0xff, 0xff, 0xff]):
self.state = State
self.writeMask = WriteMask
self.cmdBytes = [ 27 ] + WriteMask + State
def __repr__(self):
return "<u3.PortStateWrite( State = %s, WriteMask = %s )>" % (self.state, self.writeMask)
class PortDirRead(FeedbackCommand):
"""
PortDirRead Feedback command
Reads the direction of all digital I/O.
>>> import u3
>>> d = u3.U3()
>>> d.debug = True
>>> d.getFeedback(u3.PortDirRead())
Sent: [0x16, 0xf8, 0x1, 0x0, 0x1c, 0x0, 0x0, 0x1c]
Response: [0xfb, 0xf8, 0x3, 0x0, 0xfe, 0x1, 0x0, 0x0, 0x0, | |
<reponame>jamespltan/sppa
from sppa.Utilities import *
import copy
from collections import OrderedDict, Callable
class Expr:
def __init__(self):
self.singular = False
self.nlinexprs = dict()
self.lin_vars = dict()
self.bounds = None
self.var_type = None
self.expression_type = None
self.const = None
self.name = None
self.df = None
self.f = None
self.violated = None
self.violation = None
self.ftol = None
self.xtol = None
self.con_tol = None
def __add__(self, expr):
return add_expr(self, expr)
def __radd__(self, expr):
return add_expr(self, expr)
def __iadd__(self, expr):
return add_expr(self, expr, return_copy=False)
def __mul__(self, coeff):
return mul_expr(self, coeff)
def __rmul__(self, coeff):
return mul_expr(self, coeff)
def __imul__(self, coeff):
return mul_expr(self, coeff, return_copy=False)
def __truediv__(self, coeff):
return div_expr(self, coeff)
def __rtruediv__(self, expr):
raise Exception('can\'t take the reciprocal of an expression')
def __itruediv__(self, coeff):
return div_expr(self, coeff, return_copy=False)
def __sub__(self, expr):
return sub_expr(self, expr)
def __rsub__(self, expr):
return rsub_expr(self, expr)
def __isub__(self, expr):
return sub_expr(self, expr, return_copy=False)
def __pos__(self):
return self
def __neg__(self):
return mul_expr(self, -1)
def __str__(self):
return self.print_expr(return_str=True)
def compute_expression(self, variables_values):
f, self.violation, self.violated = self.evaluate_expression_lenient(variables_values, return_violation=True)
if self.f is not None:
self.df = f - self.f
self.f = f
def evaluate_expression(self, variables_values, return_violation=False):
f = 0 if self.const is None else self.const
for variable_name, var_info in self.lin_vars.items():
x = retrieve_variable_value(variables_values, var_info)
f = f + var_info.coeff * x
for nlinexpr in self.nlinexprs.values():
args = []
for var_fun in nlinexpr.vars_fun:
args += [retrieve_variable_value(variables_values, var_fun)]
f = f + nlinexpr.coeff * nlinexpr.fun_nlin(*args, **nlinexpr.kwargs)
violation = None
violated = None
if self.expression_type == EQ_CON and self.con_tol is not None:
violation = abs(f)
if abs(f) > self.con_tol:
violated = True
else:
violated = False
if self.expression_type == IEQ_CON and self.con_tol is not None:
violation = abs(min(0, f))
if f < -self.con_tol:
violated = True
else:
violated = False
if return_violation:
return f, violation, violated
else:
return f
def evaluate_expression_lenient(self, variables_values, return_violation=False):
f = 0 if self.const is None else self.const
for variable_name, var_info in self.lin_vars.items():
x = retrieve_variable_value_lenient(variables_values, var_info)
f = f + var_info.coeff * x
for nlinexpr in self.nlinexprs.values():
args = []
for var_fun in nlinexpr.vars_fun:
args += [retrieve_variable_value_lenient(variables_values, var_fun)]
f = f + nlinexpr.coeff * nlinexpr.fun_nlin(*args, **nlinexpr.kwargs)
violation = None
violated = None
if self.expression_type == EQ_CON and self.con_tol is not None:
violation = abs(f)
if abs(f) > self.con_tol:
violated = True
else:
violated = False
if self.expression_type == IEQ_CON and self.con_tol is not None:
violation = abs(min(0, f))
if f < -self.con_tol:
violated = True
else:
violated = False
if return_violation:
return f, violation, violated
else:
return f
def get_variables(self):
variables = []
for variable in self.lin_vars.values():
variables.append(variable)
for nlinexpr in self.nlinexprs.values():
for variable in nlinexpr.vars_fun:
variables.append(variable)
return variables
def print_expr(self, return_str=False):
started = False
s = ''
for var_name, var_info in self.lin_vars.items():
if started and var_info.coeff > 0:
s += ' + '
if var_info.coeff != 1.0:
if var_info.coeff == -1:
if not started:
s += '-'
else:
s += ' - '
else:
s += num_str(var_info.coeff, started) + '*'
if not started:
started = True
s += var_name
for nlinexpr in self.nlinexprs.values():
if started and nlinexpr.coeff > 0:
s += ' + '
if nlinexpr.coeff != 1.0:
if nlinexpr.coeff == -1:
if not started:
s += '-'
else:
s += ' - '
else:
s += num_str(nlinexpr.coeff, started) + '*'
s += nlinexpr.fun_nlin.__name__ + '('
for j, var in enumerate(nlinexpr.vars_fun):
if var.expand:
s += '<'
s += var.name
if var.expand:
s += '>'
if j != len(nlinexpr.vars_fun) - 1:
s += ','
if nlinexpr.kwargs:
s += ','
for j, kwarg in enumerate(nlinexpr.kwargs.items()):
s += kwarg[0] + '=' + str(kwarg[1])
if j != len(nlinexpr.kwargs) - 1:
s += ','
s += ')'
if not started:
started = True
if self.const is not None and self.const != 0:
if started and self.const > 0:
s += ' + '
s += num_str(self.const, started)
if return_str is False:
print(s)
else:
return s
# Variable type can be 'cont' for continuous (default), 'int' for integer, or 'bin' for binary
# full string names are also acceptable i.e. 'continuous', 'integer', and 'binary'
# bounds must be defined for variables involved in nonlinear expressions
class Var(Expr):
def __init__(self, var_name, low_bound=None, up_bound=None, var_type='cont', expand=False,
breakpoint_fun=None, **breakpoint_kwargs):
super().__init__()
if isinstance(var_name, numbers.Number):
self.const = var_name
return
check_string(var_name)
check_boolean(expand, 'expand')
check_breakpoint(breakpoint_fun, breakpoint_kwargs)
var_type = check_vartype(var_type)
coeff = 1.0
bounds, expand = check_bounds(low_bound, up_bound, var_type, expand)
self.singular = True
self.name = var_name
self.bounds = bounds
self.vertices = None
self.var_type = var_type
self.expand = expand
self.breakpoint_fun = None
self.breakpoint_kwargs = breakpoint_kwargs
self.var_info = VarInfo(coeff, bounds, var_type, var_name, expand, breakpoint_fun, breakpoint_kwargs)
self.lin_vars[var_name] = self.var_info
class VarInfo:
def __init__(self, coeff, bounds, var_type, name, expand, breakpoint_fun, breakpoint_kwargs):
self.coeff = coeff
self.bounds = bounds
self.var_type = var_type
self.name = name
self.expand = expand
self.decompose = False
self.n_pieces = None
self.n_vertices = None
self.vertices = None
self.decomposed_bins = None
self.decomposed_vars = None
self.value = None
self.dvalue = None
self.breakpoint_fun = breakpoint_fun
self.breakpoint_kwargs = breakpoint_kwargs
# n_pieces and n_vertices are not necessarily the ones set by the user if this is an expanded variable
def set_vertices(self, vertices):
check_vertices(vertices, self.bounds, self.var_type)
self.vertices = vertices
self.n_pieces = len(vertices) - 1
self.n_vertices = len(vertices)
self.decompose = True
self.decomposed_vars = [VarInfo(1.0, [vertices[i], vertices[i+1]], self.var_type,
self.name + '_var_' + str(i), self.expand, None, {})
for i in range(self.n_pieces)]
self.decomposed_bins = [VarInfo(1.0, [None, None], BIN_VAR, self.name + '_bin_' + str(i), False, None, {})
for i in range(self.n_pieces)]
def set_value(self, value):
if self.value is not None:
self.dvalue = value - self.value
self.value = value
# vars_fun is an iterable returning Var objects that are involved in the nonlinear expression
# once initialized, vars_fun is a list of varinfo objects
# avoid passing objects (such as splines) as kwargs
class NlinExpr(Expr):
def __init__(self, fun_nlin, *vars_fun, **kwargs):
super().__init__()
coeff = 1.0
check_kwargs(kwargs)
check_fun(fun_nlin)
vars_fun = check_variables(vars_fun, Var)
vars_fun = [var_fun.var_info for var_fun in vars_fun]
vars_fun_names = tuple(var_fun.name for var_fun in vars_fun)
self.nlinexprs[(fun_nlin, vars_fun_names, frozenset(kwargs.items()))] = \
NlinExprInfo(coeff, fun_nlin, vars_fun, kwargs)
class NlinExprInfo:
def __init__(self, coeff, fun_nlin, vars_fun, kwargs):
self.coeff = coeff
self.fun_nlin = fun_nlin
self.vars_fun = vars_fun
self.vars_fun_names = [var_fun.name for var_fun in vars_fun]
self.kwargs = kwargs
self.decomposed_vars = []
self.decomposed_bins = []
self.cube_ids = None
self.simplex_ids = None
self.nlinexpr_id = None
self.sharing_nlinexpr_decomposition = None # share nlinexpr decomposition with other nlinexpr or singular var
def decompose_expression(self, decomposed_nlinexprs, variables_to_decompose):
if self.nlinexpr_id is None:
raise Exception('id of nonlinear expression is not yet set')
self.vars_pieces = [var_fun.n_pieces for var_fun in self.vars_fun]
self.cube_ids = list(itertools.product(*[range(vp) for vp in self.vars_pieces]))
self.simplex_ids = list(itertools.permutations(range(len(self.vars_fun))))
n_simplices = np.math.factorial(len(self.vars_fun))
nlinexpr_vars_fun_names = [nlinexpr.vars_fun_names for nlinexpr in decomposed_nlinexprs]
if self.vars_fun_names in nlinexpr_vars_fun_names:
sharing_parent_index = nlinexpr_vars_fun_names.index(self.vars_fun_names)
else:
sharing_parent_index = None
if len(self.vars_fun) == 1 or sharing_parent_index is not None:
self.sharing_nlinexpr_decomposition = True
if len(self.vars_fun) == 1:
variable_to_decompose = variables_to_decompose[self.vars_fun[0].name]
self.decomposed_vars = copy.deepcopy(np.array(variable_to_decompose.decomposed_vars))
self.decomposed_vars = self.decomposed_vars[np.newaxis, :, np.newaxis]
self.decomposed_bins = copy.deepcopy(np.array(variable_to_decompose.decomposed_bins))
self.decomposed_bins = self.decomposed_bins[:, np.newaxis]
else:
self.decomposed_vars = copy.deepcopy(decomposed_nlinexprs[sharing_parent_index].decomposed_vars)
self.decomposed_bins = copy.deepcopy(decomposed_nlinexprs[sharing_parent_index].decomposed_bins)
else:
self.sharing_nlinexpr_decomposition = False
self.decomposed_vars = np.full([len(self.vars_fun)] + self.vars_pieces + [n_simplices], None)
self.decomposed_bins = np.full(self.vars_pieces + [n_simplices], None)
f_vertex = {}
for cube_id in self.cube_ids:
vertex_cube = tuple(vf.vertices[cube_id[j]] for j, vf in enumerate(self.vars_fun))
f_vertex[vertex_cube] = self.fun_nlin(*vertex_cube, **self.kwargs)
for cube_id in self.cube_ids:
cube_id_str = ','.join([str(dimension_id) for dimension_id in cube_id])
vertex_cube = [vf.vertices[cube_id[j]] for j, vf in enumerate(self.vars_fun)]
f_vertex_cube = f_vertex[tuple(vertex_cube)]
for simplex_index_id, simplex_id in enumerate(self.simplex_ids):
simplex_id_str = str(simplex_index_id)
cum_const = 0
vertex_a = copy.copy(vertex_cube)
vertex_b = copy.copy(vertex_a)
for dimension_step in simplex_id:
var_fun = self.vars_fun[dimension_step]
vertex_index = cube_id[dimension_step]
vertex_b[dimension_step] = var_fun.vertices[vertex_index + 1]
if tuple(vertex_b) not in f_vertex:
f_vertex[(tuple(vertex_b))] = self.fun_nlin(*vertex_b, **self.kwargs)
f_vertex_a = f_vertex[tuple(vertex_a)]
f_vertex_b = f_vertex[tuple(vertex_b)]
coeff = f_vertex_b - f_vertex_a
d = var_fun.vertices[vertex_index + 1] - var_fun.vertices[vertex_index]
coeff = coeff / d
cum_const += coeff * var_fun.vertices[vertex_index]
bounds = [var_fun.vertices[vertex_index], var_fun.vertices[vertex_index + 1]]
var_type = var_fun.var_type
expand = var_fun.expand
if self.sharing_nlinexpr_decomposition:
self.decomposed_vars[(dimension_step,) + cube_id + (simplex_index_id,)].coeff = coeff
else:
var_name = 'nlinexpr' + str(self.nlinexpr_id) + '_' + var_fun.name + \
'_' + cube_id_str + '_' + simplex_id_str
var_info = VarInfo(coeff, bounds, var_type, var_name, expand, None, {})
self.decomposed_vars[(dimension_step,) + cube_id + (simplex_index_id,)] | |
import numpy as np
from .. Error import UnitsError
from .utils import is_zero
from .parser import eval_expr
__all__ = [
'eval_quantity', 'eval_qty', 'Quantity', 'ArrayQuantity',
'FundamentalUnits']
def eval_quantity(expr):
"""
Evaluate string expression of physical quantity or units.
If `expr` is not a string type, then `expr` is returned unchanged.
Parameters
----------
expr : str
Describe physical quantity
Returns
-------
qty : :class:`Quantity`
Representation of physical quantity
"""
if isinstance(expr, str):
return eval_expr(expr)
else:
return expr
eval_qty = eval_quantity # Abbreviation for above (gets used a lot).
class GenericQuantity(object):
@staticmethod
def _unpack_qty(other):
if isinstance(other, Quantity):
value = other.value
units = other.units
elif isinstance(other, ArrayQuantity):
value = other.view(np.ndarray)
units = other._units
elif isinstance(other, (list, tuple)):
value = np.array(other)
units = FundamentalUnits.null()
else:
value = other
units = FundamentalUnits.null()
return (value, units)
@classmethod
def _build(cls, value, units):
if not units:
return value
if isinstance(value, (float, int)):
return Quantity(value, units)
elif isinstance(value, np.ndarray):
new_value = value.view(ArrayQuantity)
new_value._units = units
return new_value
else:
raise AssertionError(
'Unknown value type to GenericQuantity._build(): %r' % value)
"""
def __new__(cls, value, units):
q = cls._numeric_class.__new__(cls, value)
q._units = units
return q
"""
def in_units(self, units):
"""
Convert to specified `units` and return numerical value.
Parameters
----------
units : str or instance of :class:`Quantity`
Describe the units to convert to before formatting
Returns
-------
formatted : int or float
The numerical value of the converted quantity
Raises
------
UnitsError
If `units` are incompatible with the units of this quantity.
"""
value = self/eval_qty(units)
if isinstance(value, GenericQuantity):
raise UnitsError("Units of '%s' are incompatible with '%s'"
% (self, units))
return value
def has_units(self, units):
"""
Test for compatibility with given `units`.
Parameters
----------
units : str or instance of :class:`Quantity`
Describe the units this quantity must be convertible to
Returns
-------
is_compatible : bool
`True` if and only if conversion to `units` is possible.
"""
(self_value, self_units) = self._unpack_qty(self)
if isinstance(units, FundamentalUnits):
return self_units == units
else:
return self_units == self._unpack_qty(eval_qty(units))[1]
def __eq__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if not is_zero(other) and (not other_units or
not self.has_units(other_units)):
return False
return self_value == other_value
def __ne__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if not is_zero(other) and (not other_units or
not self.has_units(other_units)):
return True
return self_value != other_value
# def __lt__(self, other):
# (self_value, self_units) = self._unpack_qty(self)
# (other_value, other_units) = self._unpack_qty(other)
# if (not is_zero(other_value) and
# (not other_units or not self.has_units(other_units))):
# raise UnitsError(
# 'Incompatible units %s vs %s in comparison'
# % (self_units, other_units))
# return self_value < other_value
def __lt__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in comparison'
% (self_units, other_units))
return self_value > other_value
def __ge__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in comparison'
% (self_units, other_units))
return self_value >= other_value
def __le__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in comparison'
% (self_units, other_units))
return self_value <= other_value
def __add__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in addition'
% (self_units, other_units))
return self._build(self_value + other_value, self_units)
def __radd__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in addition'
% (self_units, other_units))
return self._build(other_value + self_value, self_units)
def __sub__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in subtraction'
% (self_units, other_units))
return self._build(self_value - other_value, self_units)
def __rsub__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if (not is_zero(other_value) and
(not other_units or not self.has_units(other_units))):
raise UnitsError(
'Incompatible units %s vs %s in subtraction'
% (self_units, other_units))
return self._build(other_value - self_value, self_units)
def __mul__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
return self._build(self_value*other_value, self_units*other_units)
def __rmul__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
return self._build(other_value*self_value, other_units*self_units)
def __truediv__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
return self._build(self_value/other_value, self_units/other_units)
__floordiv__ = __truediv__
def __div__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
return self._build(self_value/other_value, self_units/other_units)
__truediv__ = __div__
def __rdiv__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
return self._build(other_value/self_value, other_units/self_units)
__rtruediv__ = __rdiv__
def __pow__(self, other):
(self_value, self_units) = self._unpack_qty(self)
(other_value, other_units) = self._unpack_qty(other)
if other_units:
raise TypeError('Invalid operation: exponentiation by quantity.')
return self._build(
self_value**other_value, self_units**other_value)
def __rpow__(self, other):
raise TypeError('Invalid operation: exponentiation by quantity.')
def __neg__(self):
(self_value, self_units) = self._unpack_qty(self)
return self._build(-self_value, self_units)
def __abs__(self):
(self_value, self_units) = self._unpack_qty(self)
return self._build(abs(self_value), self_units)
def __str__(self):
(value, units) = self._unpack_qty(self)
return str(value) + ' ' + str(units)
def __repr__(self):
(value, units) = self._unpack_qty(self)
return '%s(%r, %r)' % (type(self).__name__, value, units)
class Quantity(GenericQuantity):
"""
Represent a physical quantity.
.. note::
This class should not be instantiated directly by users. Rather, users
should construct quantities and units using :func:`eval_qty()` or
another helper function.
Physical quantities usually possess units of some kind, and these are
stored internally as an instance of :class:`FundamentalUnits`.
Effectively, physical quantities are always stored in SI units to simplify
arithmetic. Various method are available to format or numerically convert
them to any available compatible units.
To facilitate meaningful computation with unitful quantities, this
representation overloads several arithmetic and comparison operators:
* Comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>=``, and
``>``) are supported between quantities with compatible units.
* Addition (``+``) and subtraction (``-``) are supported between
quantities with compatible units.
* Multiplication (``*``) and division (``/``) are supported between
two quantities and between a scalar and a quantity.
* Exponentation (``**``) is supported for a quantity as the base and a
scalar as the exponent.
* Conversion to type `bool`: The converted value is `True`, if and only
if the quantity is non-zero.
* Conversion to type `int` or type `float`: The converted value is
the value of the quantity in equilvalent SI units.
If two quantities involved have incompatible units and such compatiblity
is required, the operation raises :exc:`chemtk.error.UnitsError`.
If an arithmetic operation leads to a unitless result, then a number will
be returned instead of a :class:`Quantity`.
"""
__array_priority__ = 1.0
def __init__(self, value, units):
self.value = value
if isinstance(units, str):
units = eval_qty(units).units
self.units = units
def __hash__(self):
return hash(self.value)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def fmt_in_units(self, units):
"""
Format in specified `units`.
Convert this quantity to `units` and return a string representation of
the converted quantity.
Parameters
----------
units : str or instance of :class:`Quantity`
Describe the units to convert to before formatting
Returns
-------
formatted : str
String representation of quantity after converting to `units`
Raises
------
UnitsError
If `units` are incompatible with the units of this quantity.
"""
return '%g %s' % (self.in_units(units), units)
class ArrayQuantity(GenericQuantity, np.ndarray):
def __new__(cls, data, *args, **kwargs):
units = kwargs.pop('units', None)
if units:
units = eval_qty(units).units
if(hasattr(data, '__iter__')
and all((isinstance(datum, GenericQuantity) or datum == 0)
for datum in data)):
# Bundled together quantities.
values = []
use_units = None
for datum in data:
(value, datum_units) = cls._unpack_qty(datum)
if not use_units and datum_units:
use_units = datum_units
if(use_units and datum_units and use_units != datum_units
and value != 0 and not units):
raise UnitsError(
'Inconsistent units in contents provided to '
'ArrayQuantity initializer')
values.append(value)
data = values
if units:
if use_units != units:
raise UnitsError(
'Units in contents do not match specified units in '
'ArrayQuantity initializer')
else:
units = use_units
arrqty = np.array(data, *args, **kwargs)
if not issubclass(arrqty.dtype.type, np.number):
raise TypeError('Units may only be assigned to numerical values.')
arrqty = arrqty.view(cls)
if units:
arrqty._units = units
elif hasattr(arrqty, '_units'):
| |
<filename>boardfarm/devices/axiros_acs.py
import ast
import ipaddress
import os
import re
import time
import warnings
import xml.dom.minidom
from datetime import datetime
from xml.etree import ElementTree
import pexpect
import xmltodict
from boardfarm.exceptions import (ACSFaultCode, CodeError, TR069FaultCode,
TR069ResponseError)
from boardfarm.lib.bft_pexpect_helper import bft_pexpect_helper
from boardfarm.lib.network_testing import (kill_process, tcpdump_capture,
tshark_read)
from debtcollector import moves
from nested_lookup import nested_lookup
from requests import HTTPError, Session
from requests.auth import HTTPBasicAuth
from zeep import Client
from zeep.cache import InMemoryCache
from zeep.transports import Transport
from zeep.wsse.username import UsernameToken
from . import base_acs
warnings.simplefilter("always")
class AxirosACS(base_acs.BaseACS):
"""ACS connection class used to perform TR069 operations on stations/board."""
model = "axiros_acs_soap"
name = "acs_server"
# should the following be dynamic?
namespaces = {"http://www.w3.org/2001/XMLSchema-instance": None}
CPE_wait_time = 60 * 1 # too long?
Count_retry_on_error = 3 # to be audited
def __init__(self, *args, **kwargs):
"""Intialize the varible that are used in establishing connection to the ACS and\
Intialize an HTTP SOAP client which will authenticate with the ACS server.
:param ``*args``: the arguments to be used if any
:type ``*args``: tuple
:param ``**kwargs``: extra args to be used if any (mainly contains username, password, ipadress and port)
:type ``**kwargs``: dict
"""
self.args = args
self.kwargs = kwargs
self.username = self.kwargs["username"]
self.password = self.kwargs["password"]
self.ipaddr = self.kwargs["ipaddr"]
self.port = self.kwargs.get("port", None)
self.cli_port = self.kwargs.pop("cli_port", "22")
self.cli_username = self.kwargs.pop("cli_username", None)
self.cli_password = self.kwargs.pop("cli_password", None)
self.color = self.kwargs.pop("color", None)
self.options = self.kwargs.pop("options", None)
AxirosACS.CPE_wait_time = self.kwargs.pop("wait_time",
AxirosACS.CPE_wait_time)
if self.options:
options = [x.strip() for x in self.options.split(",")]
for opt in options:
if opt.startswith("wan-static-ipv6:"):
ipv6_address = opt.replace("wan-static-ipv6:", "").strip()
if "/" not in opt:
ipv6_address += "/64"
self.ipv6_interface = ipaddress.IPv6Interface(ipv6_address)
self.gwv6 = self.ipv6_interface.ip
if self.port is not None:
target = self.ipaddr + ":" + self.port
else:
target = self.ipaddr
self.wsdl = "http://" + target + "/live/CPEManager/DMInterfaces/soap/getWSDL"
session = Session()
session.auth = HTTPBasicAuth(self.username, self.password)
self.client = Client(
wsdl=self.wsdl,
transport=Transport(session=session,
cache=InMemoryCache(timeout=3600 * 3)),
wsse=UsernameToken(self.username, self.password),
)
# to spawn pexpect on cli
self.session_connected = False
if all([self.ipaddr, self.cli_username, self.cli_password]):
bft_pexpect_helper.spawn.__init__(
self,
command="ssh",
args=[
"%s@%s" % (self.cli_username, self.ipaddr),
"-p",
self.cli_port,
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"ServerAliveInterval=60",
"-o",
"ServerAliveCountMax=5",
],
)
self.check_connection(self.cli_username, self.name,
self.cli_password)
self.print_connected_console_msg(self.ipaddr, self.cli_port,
self.color, self.name)
self.session_connected = True
# this should be populater ONLY when using __main__
self.cpeid = self.kwargs.pop("cpeid", None)
def sudo_sendline(self, cmd):
# overwriting linux behaviour
# this is under assumption that acs is having root credentials.
self.sendline(cmd)
def tcp_dump(func):
""" Decorator to capture tcpdump in error cases
"""
def wrapper(self, *args, **kwargs):
pid = None
try:
if not self.session_connected:
warnings.warn(
"Tcp dump cannot be captured as no ssh session exists")
else:
capture_file = "acs_debug" + time.strftime(
"%Y%m%d-%H%M%S") + ".pcap"
tcpdump_output = tcpdump_capture(self,
"any",
capture_file=capture_file)
pid = re.search("(\[\d{1,10}\]\s(\d{1,6}))",
tcpdump_output).group(2)
out = func(self, *args, **kwargs)
kill_process(self, process="tcpdump", pid=pid)
self.sendline("rm %s" % capture_file)
return out
except Exception as e:
kill_process(self, process="tcpdump", pid=pid)
tshark_read(self, capture_file, filter_str="-Y http")
raise (e)
return wrapper
def __str__(self):
"""Format the string representation of self object (instance).
:returns: :class:`Response <Response>` string representation of self object.
:rtype: string
"""
return "AxirosACS"
# TO DO: maybe this could be moved to a lib
def _data_conversion(d):
"""Conversion type/data helper."""
def to_int(v):
return int(v)
def to_bool(v):
if v == "1":
return "true"
elif v == "0":
return "false"
return v
def to_dateTime(v):
if re.search(r"^1\s", v):
v = v.zfill(len(v) + 3)
v = datetime.strptime(
v, "%Y %m %d %H %M %S.0").strftime("%Y-%m-%dT%H:%M:%S")
return v
conv_table = {
"xsd3:string": {
"string": None
},
"xsd3:integer": {
"integer": to_int
},
"xsd3:boolean": {
"boolean": to_bool
},
"xsd3:ur-type[6]": {
"dateTime": to_dateTime
},
}
convdict = conv_table.get(d["type"])
if convdict:
d["type"] = next(iter(convdict))
if d["value"] != "" and convdict[d["type"]]:
v = convdict[d["type"]](d["value"])
d["value"] = v
return d
@staticmethod
def _parse_xml_response(data_values):
data_list = []
if type(data_values) is list:
pass
else:
data_values = [data_values]
for data in data_values:
if "AccessList" in data["value"]:
data_list.append({
"Name":
data["key"]["text"],
"AccessList":
data["value"]["AccessList"]["item"]["text"],
"Notification":
data["value"]["Notification"]["text"],
})
else:
v = data["value"].get("text", "")
if v == "":
if "item" in data["value"]:
v = " ".join(
[val.get("text") for val in data["value"]["item"]])
val_type = data["value"]["type"]
if val_type == "SOAP-ENC:Array":
val_type = data["value"][
"http://schemas.xmlsoap.org/soap/encoding/:arrayType"]
data_list.append(
AxirosACS._data_conversion({
"key": data["key"]["text"],
"type": val_type,
"value": v
}))
return data_list
@staticmethod
def _get_xml_key(resp, k="text"):
result = nested_lookup(
"Result",
xmltodict.parse(
resp.content,
attr_prefix="",
cdata_key=k,
process_namespaces=True,
namespaces=AxirosACS.namespaces,
),
)
return result
@staticmethod
def _parse_soap_response(response):
"""Parse the ACS response and return a\
list of dictionary with {key,type,value} pair."""
if "BFT_DEBUG" in os.environ:
msg = xml.dom.minidom.parseString(response.text)
print(msg.toprettyxml(indent=" ", newl=""))
result = AxirosACS._get_xml_key(response)
if len(result) > 1:
raise KeyError("More than 1 Result in reply not implemented yet")
result = result[0]
httpcode = result["code"]["text"]
msg = result["message"]["text"]
http_error_message = "HTTP Error code:" + httpcode + " " + msg
if httpcode != "200":
# with 507 (timeout/expired) there seem to be NO faultcode message
if httpcode == "500":
if "faultcode" not in result["message"]["text"]:
raise HTTPError(http_error_message)
else:
raise HTTPError(http_error_message)
# is this needed (might be overkill)?
if not all([
result.get("details"),
result.get("message"),
result.get("ticketid")
]):
e = TR069ResponseError(
"ACS malformed response (issues with either "
"details/message/ticketid).")
e.result = result # for inspection later
raise e
fault = "faultcode" in msg
if fault:
# could there be more than 1 fault in a response?
e = TR069FaultCode(msg)
e.faultdict = ast.literal_eval(msg[msg.index("{"):])
raise e
# 'item' is not present in FactoryReset RPC response
if "item" in result["details"]:
return AxirosACS._parse_xml_response(result["details"]["item"])
elif ("ns1:KeyValueStruct[0]" in result["details"]
["http://schemas.xmlsoap.org/soap/encoding/:arrayType"]):
return []
def _get_cmd_data(self, *args, **kwagrs):
"""Return CmdOptTypeStruct_data. It is a helper method."""
c_opt_type = "ns0:CommandOptionsTypeStruct"
CmdOptTypeStruct_type = self.client.get_type(c_opt_type)
CmdOptTypeStruct_data = CmdOptTypeStruct_type(*args, **kwagrs)
return CmdOptTypeStruct_data
def _get_class_data(self, *args, **kwagrs):
"""Return CPEIdClassStruct_data. It is a helper method."""
cpe__id_type = "ns0:CPEIdentifierClassStruct"
CPEIdClassStruct_type = self.client.get_type(cpe__id_type)
CPEIdClassStruct_data = CPEIdClassStruct_type(*args, **kwagrs)
return CPEIdClassStruct_data
def _get_pars_val_data(self, p_arr_type, *args, **kwargs):
"""Return ParValsParsClassArray_data.It is a helper method."""
ParValsClassArray_type = self.client.get_type(p_arr_type)
ParValsParsClassArray_data = ParValsClassArray_type(*args, **kwargs)
return ParValsParsClassArray_data
def _build_input_structs(self,
cpeid,
param,
action,
next_level=None,
**kwargs):
"""Helper function to create the get structs used in the get/set param values
NOTE: The command option is set as Syncronous
:param cpeid: the serial number of the modem through which ACS communication
happens.
:type cpeid: string
:param param: parameter to used
:type param: string or list of strings for get, dict or list of dict for set
:param action: one of GPV/SPV/GPN/AO/DO/SI/REBOOT/DOWNLOAD
:type action: string
:param next_level: defaults to null takes True/False
:type next_level: boolean
:raises: NA
:returns: param_data, cmd_data, cpeid_data
"""
if action == "SPV":
if type(param) is not list:
param = [param]
list_kv = []
# this is a list of single k,v pairs
for d in param:
k = next(iter(d))
list_kv.append({"key": k, "value": d[k]})
p_arr_type = "ns0:SetParameterValuesParametersClassArray"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, list_kv)
elif action == "GPV":
if type(param) is not list:
param = [param]
p_arr_type = "ns0:GetParameterValuesParametersClassArray"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, param)
elif action == "SPA":
if type(param) is not list:
param = [param]
list_kv = []
for d in param:
k = next(iter(d))
list_kv.append({
"Name":
k,
"Notification":
d[k],
"AccessListChange":
kwargs.get("access_param", "0"),
"AccessList": {
"item": "Subscriber"
},
"NotificationChange":
kwargs.get("notification_param", "1"),
})
p_arr_type = "ns0:SetParameterAttributesParametersClassArray"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, list_kv)
elif action == "GPN":
p_arr_type = "ns0:GetParameterNamesArgumentsStruct"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, NextLevel=next_level, ParameterPath=param)
elif action == "GPA":
if type(param) is not list:
param = [param]
p_arr_type = "ns0:GetParameterAttributesParametersClassArray"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, param)
elif action == "SI":
if type(param) is not list:
param = [param]
p_arr_type = "ns0:ScheduleInformArgumentsStruct"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, *param)
elif action in ["AO", "DO"]:
p_arr_type = "ns0:AddDelObjectArgumentsStruct"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, param, "")
elif action == "REBOOT":
p_arr_type = "xsd:string"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, param)
elif action == "DOWNLOAD":
p_arr_type = "ns0:DownloadArgumentsStruct"
ParValsParsClassArray_data = self._get_pars_val_data(
p_arr_type, *param)
else:
raise CodeError("Invalid action: " + action)
CmdOptTypeStruct_data = self._get_cmd_data(
Sync=True, Lifetime=AxirosACS.CPE_wait_time)
CPEIdClassStruct_data = self._get_class_data(cpeid=cpeid)
return ParValsParsClassArray_data, CmdOptTypeStruct_data, CPEIdClassStruct_data
def close(self):
"""Implement to close ACS connection. TODO."""
pass
def get_ticketId(self, cpeid, param):
"""ACS server maintain a ticket ID for all TR069 RPC calls.
This method will contruct a TR069 GPV query, execute it and
return the ticket id associated with it.
:param cpeid: the serial number of the modem through which ACS communication happens.
:type cpeid: string
:param param: parameter to used
:type param: string
:raises: NA
:returns: ticketid
:rtype: string
"""
GetParameterValuesParametersClassArray_type = self.client.get_type(
| |
a Library resource containing any formal logic used by the
activity definition.
kind: A description of the kind of resource the activity definition is representing.
For example, a MedicationRequest, a ServiceRequest, or a CommunicationRequest.
Typically, but not always, this is a Request resource.
profile: A profile to which the target of the activity definition is expected to
conform.
code: Detailed description of the type of activity; e.g. What lab test, what
procedure, what kind of encounter.
intent: Indicates the level of authority/intentionality associated with the activity
and where the request should fit into the workflow chain.
priority: Indicates how quickly the activity should be addressed with respect to other
requests.
doNotPerform: Set this to true if the definition is to indicate that a particular activity
should NOT be performed. If true, this element should be interpreted to
reinforce a negative coding. For example NPO as a code with a doNotPerform of
true would still indicate to NOT perform the action.
timingTiming: The period, timing or frequency upon which the described activity is to occur.
timingDateTime: The period, timing or frequency upon which the described activity is to occur.
timingAge: The period, timing or frequency upon which the described activity is to occur.
timingPeriod: The period, timing or frequency upon which the described activity is to occur.
timingRange: The period, timing or frequency upon which the described activity is to occur.
timingDuration: The period, timing or frequency upon which the described activity is to occur.
location: Identifies the facility where the activity will occur; e.g. home, hospital,
specific clinic, etc.
participant: Indicates who should participate in performing the action described.
productReference: Identifies the food, drug or other product being consumed or supplied in the
activity.
productCodeableConcept: Identifies the food, drug or other product being consumed or supplied in the
activity.
quantity: Identifies the quantity expected to be consumed at once (per dose, per meal,
etc.).
dosage: Provides detailed dosage instructions in the same way that they are described
for MedicationRequest resources.
bodySite: Indicates the sites on the subject's body where the procedure should be
performed (I.e. the target sites).
specimenRequirement: Defines specimen requirements for the action to be performed, such as required
specimens for a lab test.
observationRequirement: Defines observation requirements for the action to be performed, such as body
weight or surface area.
observationResultRequirement: Defines the observations that are expected to be produced by the action.
transform: A reference to a StructureMap resource that defines a transform that can be
executed to produce the intent resource using the ActivityDefinition instance
as the input.
dynamicValue: Dynamic values that will be evaluated to produce values for elements of the
resulting resource. For example, if the dosage of a medication must be
computed based on the patient's weight, a dynamic value would be used to
specify an expression that calculated the weight, and the path on the request
resource that would contain the result.
"""
from spark_fhir_schemas.r4.simple_types.id import idSchema
from spark_fhir_schemas.r4.complex_types.meta import MetaSchema
from spark_fhir_schemas.r4.simple_types.uri import uriSchema
from spark_fhir_schemas.r4.simple_types.code import codeSchema
from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema
from spark_fhir_schemas.r4.complex_types.contactdetail import (
ContactDetailSchema,
)
from spark_fhir_schemas.r4.simple_types.markdown import markdownSchema
from spark_fhir_schemas.r4.complex_types.usagecontext import UsageContextSchema
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
from spark_fhir_schemas.r4.complex_types.relatedartifact import (
RelatedArtifactSchema,
)
from spark_fhir_schemas.r4.simple_types.canonical import canonicalSchema
from spark_fhir_schemas.r4.complex_types.timing import TimingSchema
from spark_fhir_schemas.r4.complex_types.age import AgeSchema
from spark_fhir_schemas.r4.complex_types.range import RangeSchema
from spark_fhir_schemas.r4.complex_types.duration import DurationSchema
from spark_fhir_schemas.r4.complex_types.activitydefinition_participant import (
ActivityDefinition_ParticipantSchema,
)
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.r4.complex_types.dosage import DosageSchema
from spark_fhir_schemas.r4.complex_types.activitydefinition_dynamicvalue import (
ActivityDefinition_DynamicValueSchema,
)
if (
max_recursion_limit
and nesting_list.count("ActivityDefinition") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ActivityDefinition"]
schema = StructType(
[
# This is a ActivityDefinition resource
StructField("resourceType", StringType(), True),
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField(
"id",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content might not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content. Often,
# this is a reference to an implementation guide that defines the special rules
# along with other profiles etc.
StructField(
"implicitRules",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The base language in which the resource is written.
StructField(
"language",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# A human-readable narrative that contains a summary of the resource and can be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource and that modifies the understanding of the element
# that contains it and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer is allowed to define an extension, there is a set of requirements
# that SHALL be met as part of the definition of the extension. Applications
# processing a resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# An absolute URI that is used to identify this activity definition when it is
# referenced in a specification, model, design or an instance; also called its
# canonical identifier. This SHOULD be globally unique and SHOULD be a literal
# address at which at which an authoritative instance of this activity
# definition is (or | |
<filename>httpx_caching/_policy.py
import calendar
import dataclasses
import logging
import time
import typing
from copy import copy
from dataclasses import dataclass
from email.utils import parsedate_tz
from enum import Enum
from typing import Awaitable, Callable, Generator, Iterable, Optional, Tuple, Union
import httpcore
from httpx import Headers, Request, codes
from ._heuristics import BaseHeuristic
from ._models import Response
from ._utils import async_callback_generator, sync_callback_generator
logger = logging.getLogger(__name__)
PERMANENT_REDIRECT_STATUSES = (
301,
308,
)
INVALIDATING_METHODS = ("PUT", "PATCH", "DELETE")
Source = Enum("Source", ["CACHE", "SERVER"])
Evaluation = Enum("Evaluation", ["GOOD", "INCONCLUSIVE"])
CacheVerb = Enum("CacheVerb", ["GET", "SET", "DELETE"])
VaryData = dict
# Cache actions
@dataclass
class CacheGet:
key: str
@dataclass
class CacheSet:
key: str
response: Response
vary_header_values: dict
deferred: bool = False
@dataclass
class CacheDelete:
key: str
CacheAction = Union[CacheGet, CacheSet, CacheDelete]
# HTTP request related IO actions
@dataclass
class MakeRequest:
request: Request
@dataclass
class CloseResponseStream:
response: Response
IOAction = Union[CacheAction, MakeRequest, CloseResponseStream]
AsyncIOCallback = Callable[[IOAction], Awaitable[Optional[Response]]]
SyncIOCallback = Callable[[IOAction], Optional[Response]]
@dataclass
class CachingPolicy:
request: Request
cache_etags: bool
heuristic: Optional[BaseHeuristic]
cacheable_methods: Iterable[str]
cacheable_status_codes: Iterable[int]
kwargs = dataclasses.asdict
@typing.no_type_check
def run(
self,
io_callback: SyncIOCallback,
) -> Tuple[Response, Source]:
# TODO: Shouldn't need to make mypy ignore this should I?
return sync_callback_generator(caching_policy, io_callback, self.kwargs())
@typing.no_type_check
async def arun(
self,
io_callback: AsyncIOCallback,
) -> Tuple[Response, Source]:
return await async_callback_generator(
caching_policy, io_callback, self.kwargs()
)
def caching_policy(
request: Request,
cache_etags: bool,
heuristic: BaseHeuristic,
cacheable_methods: Tuple[str],
cacheable_status_codes: Tuple[int],
) -> Generator[IOAction, Response, Tuple[Response, Source]]:
cached_response, evaluation = yield from try_from_cache_policy(
request, cacheable_methods
)
print(f"evaluation: {evaluation}")
if cached_response and evaluation == Evaluation.GOOD:
return cached_response, Source.CACHE
response, source = yield from try_from_server_policy(
request,
cached_response,
heuristic,
cache_etags,
cacheable_status_codes,
cacheable_methods,
)
return response, source
def try_from_cache_policy(
request: Request,
cacheable_methods: Iterable[str],
) -> Generator[
CacheAction,
Tuple[Response, VaryData],
Union[Tuple[Response, Evaluation], Tuple[None, None]],
]:
"""
yield cache actions
expects responses in return
may finally return valid response as StopIteration value
"""
# Will only yield GET or DELETE CacheActions. Does not write to cache.
cache_key = get_cache_key(request)
if request.method not in cacheable_methods:
return None, None
cc = parse_cache_control_directives(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return None, None
if cc.get("max-age") == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return None, None
logger.debug(f'Looking up "{cache_key}" in the cache')
cached_response: Optional[Response]
cached_vary_data: dict
cached_response, cached_vary_data = yield CacheGet(cache_key)
if cached_response is None:
logger.debug("No cache entry available")
return None, None
if not check_vary_headers(request.headers, cached_vary_data):
logger.debug("Ignoring cache entry due to vary header mismatch")
return None, None
# If we have a cached permanent redirect, return it immediately. We
# don't need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
#
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if cached_response.status_code in PERMANENT_REDIRECT_STATUSES:
msg = (
"Returning cached permanent redirect response "
"(ignoring date and etag information)"
)
logger.debug(msg)
return cached_response, Evaluation.GOOD
if "date" not in cached_response.headers:
if "etag" not in cached_response.headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
yield CacheDelete(cache_key)
return None, None
logger.debug("Ignoring cached response: no date")
# TODO: Should this return None? Is the cached response now no longer relevant to this request?
return cached_response, Evaluation.INCONCLUSIVE
now = time.time()
# TODO: parsedate_tz might return None (no date value or malformed)
date = calendar.timegm(parsedate_tz(cached_response.headers["date"])) # type: ignore
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
resp_cc = parse_cache_control_directives(cached_response.headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if "max-age" in resp_cc:
freshness_lifetime = resp_cc["max-age"]
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in cached_response.headers:
expires = parsedate_tz(cached_response.headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires) - date # type: ignore
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if "max-age" in cc:
freshness_lifetime = cc["max-age"]
logger.debug("Freshness lifetime from request max-age: %i", freshness_lifetime)
if "min-fresh" in cc:
min_fresh = cc["min-fresh"]
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return cached_response, Evaluation.GOOD
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in cached_response.headers:
logger.debug('The cached response is "stale" with no etag, purging')
yield CacheDelete(cache_key)
return None, None
# No conclusive response yet.
return cached_response, Evaluation.INCONCLUSIVE
def try_from_server_policy(
request: Request,
cached_response: Optional[Response],
heuristic: BaseHeuristic,
cache_etags: bool,
cacheable_status_codes: Iterable[int],
cacheable_methods: Iterable[str],
) -> Generator[IOAction, Response, Tuple[Response, Source]]:
cache_key = get_cache_key(request)
print("we have this from the cache:", cached_response)
updated_headers = request.headers.copy()
if cached_response:
# Add conditional headers based on cached response
for source, target in [
("etag", "If-None-Match"),
("last-modified", "If-Modified-Since"),
]:
if source in cached_response.headers:
updated_headers[target] = cached_response.headers[source]
request = Request(
method=request.method,
url=request.url,
headers=updated_headers,
stream=request.stream,
)
server_response = yield MakeRequest(request)
# See if we should invalidate the cache.
if is_invalidating_method(request.method) and not codes.is_error(
server_response.status_code
):
yield CacheDelete(cache_key)
if request.method not in cacheable_methods:
return server_response, Source.SERVER
# Check for any heuristics that might update headers
# before trying to cache.
if heuristic:
# TODO: don't modify things, return things.
heuristic.apply(server_response.headers, server_response.status_code)
# apply any expiration heuristics
if server_response.status_code == 304:
# Make sure to clean up the ETag response stream just in case.
# Compliant servers will not return a body with ETag responses
yield CloseResponseStream(server_response)
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an ETag. In either case, we want to try and
# update the cache if that is the case.
if cached_response:
updated_cached_response = update_with_304_response(
cached_response, new_response_headers=server_response.headers
)
vary_header_values = get_vary_headers(
request.headers, updated_cached_response
)
yield CacheSet(cache_key, updated_cached_response, vary_header_values)
return updated_cached_response, Source.CACHE
return server_response, Source.SERVER
# We have a new response, let's make any changes necessary to the cache (store/delete)
cache_exists = bool(cached_response)
cache_action = cache_response_action(
request,
server_response,
cache_exists,
cache_etags,
cacheable_status_codes,
)
if cache_action:
wrapped_stream_response = yield cache_action
if wrapped_stream_response:
server_response = wrapped_stream_response
return server_response, Source.SERVER
def cache_response_action(
request: Request,
server_response: Response,
cache_exists: bool,
cache_etags: bool,
cacheable_status_codes: Iterable[int],
) -> Optional[Union[CacheSet, CacheDelete]]:
"""
Algorithm for caching responses.
Does some checks on request and response and deletes cache if appropriate
Then either:
No cache
Cache immediately with no body for redirects
Cache with body, this must be deferred.
Returns:
May return a request that has had its stream wrapped to trigger caching once read.
"""
cache_key = get_cache_key(request)
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if server_response.status_code not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s",
server_response.status_code,
cacheable_status_codes,
)
return None
logger.debug('Updating cache with response from "%s"', cache_key)
# TODO: Do this once on the request/response?
cc_req = parse_cache_control_directives(request.headers)
cc = parse_cache_control_directives(server_response.headers)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and cache_exists:
logger.debug('Purging existing cache entry to honor "no-store"')
return CacheDelete(cache_key)
if no_store:
return None
# https://tools.ietf.org/html/rfc7234#section-4.1:
# A Vary header field-value of "*" always fails to match.
# Storing such a response leads to a deserialization warning
# during cache lookup and is not allowed to ever be served,
# so storing it can be avoided.
if "*" in server_response.headers.get("vary", ""):
logger.debug('Response header has "Vary: *"')
return None
# If we've been given an etag, then keep the response
if cache_etags and "etag" in server_response.headers:
logger.debug("Caching due to etag")
# Add to the cache any permanent redirects. We do this before looking
# that the Date headers.
elif int(server_response.status_code) in PERMANENT_REDIRECT_STATUSES:
logger.debug("Caching permanent redirect")
response_body = | |
<reponame>C6SUMMER/allinclusive-kodi-pi
#############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from string import *
import sys,os.path,re,os,time,datetime,traceback,urllib,re,random,string,xbmc,xbmcgui,xbmcaddon,shutil
import zipfile
import copy
addon=xbmcaddon.Addon(id='script.navi-x')
root_path=addon.getAddonInfo('path')
sys.path.append(os.path.join(root_path.replace(";",""),'src'))
from libs2 import *
from settings import *
from CPlayList import *
from CFileLoader import *
from CURLLoader import *
from CDownLoader import *
from CPlayer import *
from CDialogBrowse import *
from CTextView import *
from CInstaller import *
from skin import *
from CBackgroundLoader import *
from CServer import *
try: Emulating=xbmcgui.Emulating
except: Emulating=False
######################################################################
# Description: Main Window class
######################################################################
class MainWindow(xbmcgui.WindowXML):
def __init__(self,strXMLname,strFallbackPath):#,strDefaultName,forceFallback):
self.CurItemURL=''
#self.delFiles(tempCacheDir) #clear the temp cache first
self.delFiles(imageViewCacheDir) #clear the image view cache first
if os.path.exists(RootDir+'CFileLoader.py'): os.remove(RootDir+'CFileLoader.py')
if os.path.exists(RootDir+'CFileLoader.pyc'): os.remove(RootDir+'CFileLoader.pyc')
#Create default DIRs if not existing
for DIRs in 'favorites','My Playlists','My Downloads', 'cache' : #/images', 'cache/temp':
if DIRs=='My Downloads' and os.path.exists(datapaths+DIRs) and not os.path.exists(datapaths+DIRs+'/readme.txt'):
shutil.copyfile((initDir+DIRs+'/readme.txt'),(datapaths+DIRs+'/readme.txt'))
elif not os.path.exists(RootDir+DIRs) and not os.path.exists(datapaths+DIRs):
if not os.path.exists(initDir+DIRs): os.makedirs(datapaths+DIRs)
else:
try: shutil.copytree(initDir+DIRs,datapaths+DIRs)
except:
if not os.path.exists(datapaths+DIRs):
shutil.copy(initDir+DIRs,datapaths+DIRs)
elif os.path.exists(RootDir+DIRs) and not os.path.exists(datapaths+DIRs):
shutil.move((RootDir+DIRs),(datapaths+DIRs))
elif os.path.exists(RootDir+DIRs) and os.path.exists(datapaths+DIRs) and DIRs !='My Downloads' :
root_src_dir=os.path.join(RootDir,DIRs)
root_dst_dir=os.path.join(datapaths+DIRs)
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir=src_dir.replace(root_src_dir,root_dst_dir)
if not os.path.exists(dst_dir):
os.makesdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir,file_)
dst_file = os.path.join(dst_dir,file_)
if os.path.exists(dst_file):
#os.rename(dst_file,dst_file+"old")
os.remove(dst_file)
shutil.move(src_file,dst_dir)
if 'init' not in RootDir:
try: shutil.rmtree(root_src_dir)
except: pass
#Create cache DIRs if not existing
for DIRs in cacheDir,imageCacheDir,tempCacheDir,nookieCacheDir,procCacheDir:
if not os.path.exists(DIRs):
os.mkdir(DIRs)
#dialog=xbmcgui.Dialog(); dialog.ok("Notice","First part done")
#check if default files exist, otherwise copy them from the root or init dirs appnding new files
for loopfiles in favorite_file,downloads_queue,parent_list,history_list,\
downloads_complete,incomplete_downloads,MyPlaylists_list,'My Playlists/My Playlists.plx':
if not os.path.exists(xbmc.translatePath(os.path.join(datapaths,loopfiles)))and loopfiles != 'My Playlists/My Playlists.plx':
shutil.copyfile(initDir+loopfiles,xbmc.translatePath(os.path.join(datapaths,loopfiles)))
#if root file exists, append datapath file to include entries then delete
if loopfiles=='My Playlists/My Playlists.plx':
if os.path.exists(os.path.join(datapaths,loopfiles)): rootdir=datapaths
else: rootdir=RootDir
if os.path.exists(rootdir+loopfiles):
EntryCounter=0; LineCounter=0; PrintLine=0
with open(rootdir+loopfiles,'r') as RootFile:
if loopfiles=='My Playlists/My Playlists.plx':
files='My Playlists.plx'
else:files=loopfiles
with open(datapaths+files,'a+') as DatapathFile:
for line in RootFile:
if line.startswith('#'): EntryCounter +=1
if line.startswith('#p'): EntryCounter=0
if EntryCounter > 0 and LineCounter > 2:
if PrintLine > 0 :
DatapathFile.write(line)
PrintLine+=1
LineCounter+=1
RootFile.close();DatapathFile.close()
if 'init' not in rootdir:
os.remove(rootdir+loopfiles)
#Create playlist object contains the parsed playlist data. The self.lists control displays
#the content of this list
self.playlist=CPlayList(window=self,whatlist='init - playlist')
self.downloadslist=CPlayList(whatlist='init - downloadslist')
#fill the playlist with downloads data
result=self.downloadslist.load_plx(downloads_complete)
if result != 0:
shutil.copyfile(initDir+downloads_complete,datapaths+downloads_complete)
self.downloadslist.load_plx(downloads_complete)
self.incompletelist = CPlayList(whatlist='init - incompletelist')
#fill the playlist with downloads data
result=self.incompletelist.load_plx(incomplete_downloads)
if result != 0:
shutil.copyfile(initDir+incomplete_downloads,datapaths+incomplete_downloads)
self.incompletelist.load_plx(incomplete_downloads)
self.downloadqueue=CPlayList(whatlist='init - downloadqueue')
#fill the playlist with downloads data
result=self.downloadqueue.load_plx(downloads_queue)
if result != 0:
shutil.copyfile(initDir+downloads_queue,datapaths+downloads_queue)
self.downloadqueue.load_plx(downloads_queue)
#self.parentlist=CPlayList()
self.parentlist=CPlayList(whatlist='init - parentlist')
#fill the playlist with downloads data
result=self.parentlist.load_plx(parent_list)
if result != 0:
shutil.copyfile(initDir+parent_list,datapaths+parent_list)
self.parentlist.load_plx(parent_list)
self.history=CPlayList(whatlist='init - history')
#fill the playlist with history data
result=self.history.load_plx(history_list)
if result != 0:
shutil.copyfile(initDir+history_list,datapaths+history_list)
self.history.load_plx(history_list)
#Set the socket timeout for all urllib2 open functions.
socket_setdefaulttimeout(url_open_timeout)
#Next a number of class private variables
self.home=home_URL
self.home_dat=home_URL
self.dwnlddir=myDownloadsDir
self.History=[] #contains the browse history
self.history_count=0 #number of entries in history array
self.userthumb='' #user thumb image
self.state_busy=0 # key handling busy state
self.state2_busy=0 # logo update busy state
self.URL='http://'
self.type=''
#default player will be DVD player
self.player_core=xbmc.PLAYER_CORE_DVDPLAYER
self.pl_focus=self.playlist
self.downlshutdown=False # shutdown after download flag
self.mediaitem=0
self.thumb_visible=False # true if thumb shall be displayed
self.vieworder='ascending' #ascending
self.SearchHistory=[] #contains the search history
self.background='' #current background image
self.password="" #parental control password.
self.hideblocked="" #parental control hide blocked content
self.access=False #parental control access.
self.mediaitem_cutpaste=0 # selected item for cut/paste
self.page=0 #selected page
self.descr_view=False
self.default_background='default'
self.disable_background='false'
self.listview='default'
self.smartcache='true'
self.page_size=page_size
#read the non volatile settings from the settings.dat file
self.onReadSettings()
#read the search history from the search.dat file
self.onReadSearchHistory()
#check if the home playlist points to the old website. If true then update the home URL.
if self.home==home_URL_old:
self.home=home_URL
for hURLa in home_URL_oldD:
if self.home==hURLa:
self.home=home_URL
self.firsttime=False
#xbmc.executebuiltin("xbmc.ActivateWindow(VideoOverlay)")
#end of function
######################################################################
# Description: class xbmcgui default member function.
# Parameters : -
# Return : -
######################################################################
def onInit( self ):
if self.firsttime==True:
return
self.firsttime=True
load_skin(self)
if nxserver.is_user_logged_in()==True:
if platform=='xbox':
pos=4
else:
pos=5
self.list3.getListItem(pos).setLabel("Sign out")
self.version.setLabel('version: '+Version+'.'+SubVersion+" (signed in)")
#thumb update task
self.bkgndloadertask=CBackgroundLoader(window=self)
self.bkgndloadertask.start()
#background download task
self.downloader=CDownLoader(window=self, playlist_src=self.downloadqueue, \
playlist_dst=self.downloadslist, \
playlist_inc=self.incompletelist)
self.downloader.start()
#Configure the info text control
SetInfoText(window=self.infotekst)
#check if there is a startup playlist
result=-1
if os.path.exists(RootDir+startup_list):
#yes there is a startup script, load it and use the first entry in the list.
startuplist=CPlayList(whatlist='onInit - startuplist')
result=startuplist.load_plx(RootDir+startup_list)
os.remove(RootDir+startup_list)
if result==0:
result=self.ParsePlaylist(mediaitem=startuplist.list[0],proxy="CACHING") #always use the first playlist item
if result != 0:
#there is no startup playlist, load the Navi-X home page
result=self.ParsePlaylist(URL=self.home,proxy="CACHING")
if result != 0: #failed
result=self.ParsePlaylist(URL=home_URL_mirror,proxy="CACHING") #mirror site
if result != 0: #failed
result=self.ParsePlaylist(URL=MyXBMC_list,proxy="CACHING")
if result != 0:
#failed to load page startup page from both main and backup server
dialog=xbmcgui.Dialog()
dialog.ok("Error","Please check your internet connection!")
return
#check the download queue
if self.downloadqueue.size() > 0:
dialog=xbmcgui.Dialog()
if dialog.yesno("Message","Download queue not empty. Start download now?")==True:
self.downloader.download_start()
#end of function
######################################################################
# Description: class xbmcgui default member function..
# Parameters : action=user action
# Return : -
######################################################################
def onAction(self,action):
try:
#select item is handled via other onClick().
if not action.getId()==ACTION_SELECT_ITEM:
self.onAction1(action)
#end of function
except: print '* Error during onAction.'
######################################################################
#
#
#
######################################################################
def doPageBack(self):
try:
if self.descr_view==True:
self.list3tb.setVisible(0)
self.list.setVisible(1)
self.setFocus(self.list)
self.descr_view=False
elif (self.URL==downloads_queue) or (self.URL==downloads_complete) or \
(self.URL==parent_list) or (self.URL==incomplete_downloads):
self.onCloseDownloads()
else:
#main list
if self.history_count > 0:
previous=self.History[len(self.History)-1]
result=self.ParsePlaylist(mediaitem=previous.mediaitem,start_index=previous.index,proxy="ENABLED")
if result==0: #success
flush=self.History.pop()
self.history_count=self.history_count-1
else:
self.setFocus(self.list3)
except:
print '* Error during backing up'
######################################################################
# Description: class xbmcgui default member function.
# Parameters : action=user action
# Return : -
######################################################################
def onAction1(self,action):
try:
self.state_action=1
#always allow Exit even if busy
if ((action==ACTION_SELECT_ITEM) and (self.getFocus()==self.list3)): #or ((action==ACTION_SELECT_ITEM) and (self.getFocus()==self.exitbutton2)):
pos=self.list3.getSelectedPosition()
if (platform=='xbox') and (pos==5) or (pos==6):
self.state_busy=1
#self.setInfoText("Shutting Down Navi-X...")
SetInfoText("Shutting Down Navi-X...",setlock=True)
self.onSaveSettings()
self.bkgndloadertask.kill()
self.bkgndloadertask.join(10) #timeout after 10 seconds.
self.downloader.kill()
self.downloader.join(10) #timeout after 10 seconds.
self.close() #exit
if self.state_busy==0:
if action==ACTION_SELECT_ITEM:
#main list
if self.getFocus()==self.list:
if (self.URL==downloads_file) or (self.URL==downloads_queue) or \
(self.URL==downloads_complete) or (self.URL==parent_list) or \
(self.URL==incomplete_downloads):
self.onSelectDownloads()
else:
pos=self.list.getSelectedPosition()
if pos >= 0:
self.SelectItem(self.playlist,pos)
#button option
try:
if self.getFocus() == self.list3:
#Left side option menu
pos=self.list3.getSelectedPosition()
if (platform=='xbox') and (pos > 2):
pos=pos+1
if pos==0:
self.pl_focus=self.playlist
self.ParsePlaylist(URL=self.home)
elif pos==1:
self.onOpenFavorites()
elif pos==2:
self.onOpenDownloads()
elif pos==3:
self.onChangeView()
elif pos==4:
self.setFocus(self.list)
self.onSelectURL()
elif pos==5: #sign in
if platform=='xbox':
pos=pos-1
self.setFocus(self.list)
if nxserver.is_user_logged_in()==False:
result=nxserver.login()
if result==0:
dialog=xbmcgui.Dialog()
dialog.ok(" Sign in","Sign in Successful.")
self.list3.getListItem(pos).setLabel("Sign out")
self.version.setLabel('version: '+Version+'.'+SubVersion+" (signed in)")
elif result == -1:
dialog=xbmcgui.Dialog()
dialog.ok(" Sign in","Sign in failed.")
else: #sign out
#user already logged in
dialog=xbmcgui.Dialog()
if dialog.yesno("Message","Sign out?")==True:
nxserver.logout()
self.list3.getListItem(pos).setLabel("Sign in")
dialog=xbmcgui.Dialog()
dialog.ok(" Sign out","Sign out successful.")
self.version.setLabel('version: '+Version+'.'+SubVersion)
except: pass
try:
if self.getFocus()==self.list4:
#Right side option menu
pos=self.list4.getSelectedPosition()
if self.descr_view==True:
#self.setFocus(self.list3tb)
self.setFocus(self.getControl(128))
else:
self.setFocus(self.list)
if pos==0: # Play
self.onPlayUsing()
elif pos==1: # Add to Favs
self.selectBoxMainList(choice=9)
elif pos==2: # Download
self.onDownload()
elif pos==3: # Rate it
pos=self.list.getSelectedPosition()
if self.pl_focus.list[pos].rating=='disabled':
dialog=xbmcgui.Dialog()
dialog.ok(" Error","Not supported.")
elif self.pl_focus.URL.find(nxserver_URL) != -1:
nxserver.rate_item(self.pl_focus.list[pos])
self.UpdateRateingImage()
else:
dialog=xbmcgui.Dialog()
dialog.ok(" Error","Only Navi-Xtreme playlists can be rated.")
elif pos==4: # Reload Playlist
if self.descr_view == True:
self.list3tb.setVisible(0)
self.list.setVisible(1)
self.setFocus(self.list)
self.descr_view = False
self.ParsePlaylist(mediaitem=self.mediaitem, proxy="CACHING")
elif pos==5: #More Options
if self.IsFavoriteListFocus()==True:
self.selectBoxFavoriteList()
elif (self.URL==downloads_file) or (self.URL==downloads_queue) or \
(self.URL==downloads_complete) or (self.URL==parent_list) or \
(self.URL==incomplete_downloads):
self.selectBoxDownloadsList()
else:
self.selectBoxMainList()
except: pass
elif (action==ACTION_PARENT_DIR) or (action==ACTION_PREVIOUS_MENU) or (action==ACTION_PREVIOUS_MENU2):
try:
if self.descr_view==True:
self.list3tb.setVisible(0)
self.list.setVisible(1)
self.setFocus(self.list)
self.descr_view=False
elif (self.URL==downloads_queue) or (self.URL==downloads_complete) or \
(self.URL==parent_list) or (self.URL==incomplete_downloads):
self.onCloseDownloads()
else:
#main list
if self.history_count > 0:
previous=self.History[len(self.History)-1]
result=self.ParsePlaylist(mediaitem=previous.mediaitem,start_index=previous.index,proxy="ENABLED")
if result==0: #success
flush=self.History.pop()
self.history_count=self.history_count-1
else:
self.setFocus(self.list3)
except:
print '* Error during backing up'
elif action==ACTION_YBUTTON:
self.onPlayUsing()
elif action==ACTION_MOVE_RIGHT:
if (self.getFocus()==self.list) and (self.list != self.list5):
result=self.onShowDescription()
if result != 0:
| |
as .user ==)
# and functional filters that aren't currently possible using the orm (such as instance calcluated values
# or annotations/tags). List splits those two filters and applies limits/offsets
# only after functional filters (if any) using python.
orm_filters, fn_filters = self._split_filters(filters)
if not fn_filters:
# if no fn_filtering required, we can use the 'all orm' version with limit offset
return self._orm_list(filters=orm_filters, order_by=order_by, limit=limit, offset=offset, **kwargs)
# fn filters will change the number of items returnable by limit/offset - remove them here from the orm query
query = self.query(filters=orm_filters, order_by=order_by, limit=None, offset=None, **kwargs)
items = query.all()
# apply limit, offset after SQL filtering
items = self._apply_fn_filters_gen(items, fn_filters)
return list(self._apply_fn_limit_offset_gen(items, limit, offset))
def _split_filters(self, filters):
"""
Splits `filters` into a tuple of two lists:
a list of filters to be added to the SQL query
and a list of functional filters to be applied after the SQL query.
"""
orm_filters: list = []
fn_filters: list = []
if filters is None:
return (orm_filters, fn_filters)
if not isinstance(filters, list):
filters = [filters]
for filter_ in filters:
if not hasattr(filter_, "filter_type"):
orm_filters.append(filter_)
elif filter_.filter_type == "function":
fn_filters.append(filter_.filter)
elif filter_.filter_type == "orm_function":
orm_filters.append(filter_.filter(self.model_class))
else:
orm_filters.append(filter_.filter)
return (orm_filters, fn_filters)
def _orm_list(self, query=None, **kwargs):
"""
Sends kwargs to build the query return all models found.
"""
query = query or self.query(**kwargs)
return query.all()
def _apply_fn_filters_gen(self, items, filters):
"""
If all the filter functions in `filters` return True for an item in `items`,
yield that item.
"""
# cpu-expensive
for item in items:
filter_results = [f(item) for f in filters]
if all(filter_results):
yield item
def _apply_fn_limit_offset_gen(self, items, limit, offset):
"""
Iterate over `items` and begin yielding items after
`offset` number of items and stop when we've yielded
`limit` number of items.
"""
# change negative limit, offset to None
if limit is not None and limit < 0:
limit = None
if offset is not None and offset < 0:
offset = None
yielded = 0
for i, item in enumerate(items):
if offset is not None and i < offset:
continue
if limit is not None and yielded >= limit:
break
yield item
yielded += 1
def by_ids(self, ids, filters=None, **kwargs):
"""
Returns an in-order list of models with the matching ids in `ids`.
"""
if not ids:
return []
ids_filter = parsed_filter("orm", self.model_class.table.c.id.in_(ids))
found = self.list(filters=self._munge_filters(ids_filter, filters), **kwargs)
# TODO: this does not order by the original 'ids' array
# ...could use get (supposedly since found are in the session, the db won't be hit twice)
# return map( self.session().query( self.model_class ).get, ids )
# ...could implement own version here - slow?
return self._order_items_by_id(ids, found)
def _order_items_by_id(self, ids, items):
"""
Given a list of (unique) ids and a list of items having an 'id' attribute,
return items that have the given ids in that order.
If an id in ids is not found or if an item in items doesn't have a given
id, they will not be in the returned list.
"""
ID_ATTR_NAME = "id"
# TODO:?? aside from sqlalx.get mentioned above, I haven't seen an in-SQL way
# to make this happen. This may not be the most efficient way either.
# NOTE: that this isn't sorting by id - this is matching the order in items to the order in ids
# move items list into dict by id
item_dict = {}
for item in items:
item_id = getattr(item, ID_ATTR_NAME, None)
if item_id:
item_dict[item_id] = item
# pull from map in order of ids
in_order = []
for id in ids:
if id in item_dict:
in_order.append(item_dict[id])
return in_order
def create(self, flush=True, *args, **kwargs):
"""
Generically create a new model.
"""
# override in subclasses
item = self.model_class(*args, **kwargs)
self.session().add(item)
if flush:
self.session().flush()
return item
def copy(self, item, **kwargs):
"""
Clone or copy an item.
"""
raise exceptions.NotImplemented("Abstract method")
def update(self, item, new_values, flush=True, **kwargs):
"""
Given a dictionary of new values, update `item` and return it.
..note: NO validation or deserialization occurs here.
"""
self.session().add(item)
for key, value in new_values.items():
if hasattr(item, key):
setattr(item, key, value)
if flush:
self.session().flush()
return item
def associate(self, associate_with, item, foreign_key_name=None):
"""
Generically associate `item` with `associate_with` based on `foreign_key_name`.
"""
foreign_key_name = foreign_key_name or self.foreign_key_name
setattr(associate_with, foreign_key_name, item)
return item
def _foreign_key(self, associated_model_class, foreign_key_name=None):
foreign_key_name = foreign_key_name or self.foreign_key_name
return getattr(associated_model_class, foreign_key_name)
def query_associated(self, associated_model_class, item, foreign_key_name=None):
"""
Generically query other items that have been associated with this `item`.
"""
foreign_key = self._foreign_key(associated_model_class, foreign_key_name=foreign_key_name)
return self.session().query(associated_model_class).filter(foreign_key == item)
# a rename of sql DELETE to differentiate from the Galaxy notion of mark_as_deleted
# def destroy( self, item, **kwargs ):
# return item
T = TypeVar("T")
# ---- code for classes that use one *main* model manager
# TODO: this may become unecessary if we can access managers some other way (class var, app, etc.)
class HasAModelManager(Generic[T]):
"""
Mixin used where serializers, deserializers, filter parsers, etc.
need some functionality around the model they're mainly concerned with
and would perform that functionality with a manager.
"""
#: the class used to create this serializer's generically accessible model_manager
model_manager_class: Type[
T
] # ideally this would be Type[ModelManager] but HistoryContentsManager cannot be a ModelManager
# examples where this doesn't really work are ConfigurationSerializer (no manager)
# and contents (2 managers)
app: MinimalManagerApp
def __init__(self, app: MinimalManagerApp, manager=None, **kwargs):
self._manager = manager
self.app = app
@property
def manager(self) -> T:
"""Return an appropriate manager if it exists, instantiate if not."""
# PRECONDITION: assumes self.app is assigned elsewhere
if not self._manager:
# TODO: pass this serializer to it
self._manager = self.app[self.model_manager_class]
# this will error for unset model_manager_class'es
return self._manager
# ==== SERIALIZERS/to_dict,from_dict
class ModelSerializingError(exceptions.InternalServerError):
"""Thrown when request model values can't be serialized"""
class ModelDeserializingError(exceptions.ObjectAttributeInvalidException):
"""Thrown when an incoming value isn't usable by the model
(bad type, out of range, etc.)
"""
class SkipAttribute(Exception):
"""
Raise this inside a serializer to prevent the returned dictionary from having
a the associated key or value for this attribute.
"""
class Serializer(Protocol):
def __call__(self, item: Any, key: str, **context) -> Any:
...
class ModelSerializer(HasAModelManager[T]):
"""
Turns models into JSONable dicts.
Maintains a map of requestable keys and the Callable() serializer functions
that should be called for those keys.
E.g. { 'x' : lambda item, key: item.x, ... }
Note: if a key to serialize is not listed in the Serializer.serializable_keyset
or serializers, it will not be returned.
To serialize call:
my_serializer = MySerializer( app )
...
keys_to_serialize = [ 'id', 'name', 'attr1', 'attr2', ... ]
item_dict = MySerializer.serialize( my_item, keys_to_serialize )
"""
default_view: Optional[str]
views: Dict[str, List[str]]
def __init__(self, app: MinimalManagerApp, **kwargs):
"""
Set up serializer map, any additional serializable keys, and views here.
"""
super().__init__(app, **kwargs)
# a list of valid serializable keys that can use the default (string) serializer
# this allows us to: 'mention' the key without adding the default serializer
# TODO: we may want to eventually error if a key is requested
# that is in neither serializable_keyset or serializers
self.serializable_keyset: Set[str] = set()
# a map of dictionary keys to the functions (often lambdas) that create the values for those keys
self.serializers: Dict[str, Serializer] = {}
# add subclass serializers defined there
self.add_serializers()
# update the keyset by the serializers (removing the responsibility from subclasses)
self.serializable_keyset.update(self.serializers.keys())
# views are collections of serializable attributes (a named array of keys)
# inspired by model.dict_{view}_visible_keys
self.views = {}
self.default_view = None
@staticmethod
def url_for(*args, context=None, **kwargs):
trans = context and context.get("trans")
url_for = trans and trans.url_builder or gx_url_for
return url_for(*args, **kwargs)
def add_serializers(self):
"""
Register a map of attribute keys -> serializing functions that will serialize
the attribute.
"""
self.serializers.update(
{
"id": self.serialize_id,
"create_time": self.serialize_date,
"update_time": self.serialize_date,
}
)
def add_view(self, view_name, key_list, include_keys_from=None):
"""
Add the list of serializable attributes `key_list` to the serializer's
view dictionary under the key `view_name`.
If `include_keys_from` is a proper view name, extend `key_list` by
| |
Pin(num='23',name='P4.6/TBOUTH/A15/OA1I3',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='TA1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='P3.3/UCB0CLK/UCA0STE',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='P4.7/TBCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='TA2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='P3.4/UCA0TXD/UCA0SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='SMCLK/TCK/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='P3.5/UCA0RXD/UCA0SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='TA0/TMS/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='P4.0/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='P3.6/A6/OA0I2',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='TA1/TDI/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='P4.1/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='P3.7/A7/OA1I2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='TA2/TDI/TDO/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='P4.2/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA1/A3/VREF-/VeREF-/OA1I1/OA1O/P2.3',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F2234IRHA',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='40pin QFN, 32KB + 256B Flash Memory, 1KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F2254IRHA', 'MSP430F2274IRHA'],pins=[
Pin(num='1',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='XOUT/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='XIN/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='~RST~/NMI/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='ACLK/A0/OA0I0/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='TAINCLK/SMCLK/A1/OA0O/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='TA0/A2/OA0I1/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P3.0/UCB0STE/UCA0CLK/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P3.1/UCB0SIMO/UCB0SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='P4.5/TB2/A14/OA0I3',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='Rosc/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='P3.2/UCB0SOMI/UCB0SCL',func=Pin.BIDIR,do_erc=True),
Pin(num='21',name='P4.6/TBOUTH/A15/OA1I3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P3.3/UCB0CLK/UCA0STE',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='P4.7/TBCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='TA2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='23',name='P3.4/UCA0TXD/UCA0SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='SMCLK/TCK/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='P3.5/UCA0RXD/UCA0SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='TA0/TMS/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='P4.0/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='P3.6/A6/OA0I2',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='TA1/TDI/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='P4.1/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='P3.7/A7/OA1I2',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='TA2/TDI/TDO/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='P4.2/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='TA1/A3/VREF-/VeREF-/OA1I1/OA1O/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='SBWTCK/TEST',do_erc=True),
Pin(num='18',name='P4.3/TB0/A12/OA0O',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='TA2/A4/VREF+/VeREF+/OA1I0/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='19',name='P4.4/TB1/A13/OA1O',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TACLK/ADC10CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='MSP430F2234IYFF',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='49ball BGA, 32KB + 256B Flash Memory, 1KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F2254IYFF', 'MSP430F2274IYFF'],pins=[
Pin(num='A1',name='XOUT/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C1',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='D1',name='SBWTCK/TEST',do_erc=True),
Pin(num='E1',name='TA0/TMS/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='F1',name='SMCLK/TCK/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='G1',name='TA2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='A2',name='XIN/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='B2',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C2',name='ROSC/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='D2',name='TA2/TDO/TDI/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='E2',name='TA1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='F2',name='TACLK/ADC10CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='G2',name='TA0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='B3',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='C3',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='D3',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='E3',name='TA1/TDI/TCLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='F3',name='TA1/VREF-/VeREF-/OA1I1/OA1O/A3/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='G3',name='TA2/VREF+/VeREF+/OA1I0/A4/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='ACLK/A0/OA0I0/A0/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='B4',name='TAINCLK/SMCLK/OA0O/A1/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='D4',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='E4',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='F4',name='P3.6/A6/OA0I2',func=Pin.BIDIR,do_erc=True),
Pin(num='G4',name='P3.7/A7/OA1I2',func=Pin.BIDIR,do_erc=True),
Pin(num='A5',name='TA0/OA0I1/A2/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='B5',name='P3.0/UCB0STE/UCA0CLK/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='C5',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='D5',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='E5',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='F5',name='P4.7/TBCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='G5',name='P3.5/UCA0RXD/UCA0SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='A6',name='P3.1/UCB0SIMO/UCB0SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='B6',name='P3.3/UCB0CLK/UCA0STE',func=Pin.BIDIR,do_erc=True),
Pin(num='C6',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='D6',name='P4.0/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='E6',name='P4.2/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='F6',name='P4.5/TB2/A14/OA0I3',func=Pin.BIDIR,do_erc=True),
Pin(num='G6',name='P3.4/UCA0TXD/UCA0SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='A7',name='P3.2/UCB0SOMI/UCB0SCL',func=Pin.BIDIR,do_erc=True),
Pin(num='B7',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C7',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='D7',name='P4.1/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='E7',name='P4.3/TB0/A12/OA0O',func=Pin.BIDIR,do_erc=True),
Pin(num='F7',name='P4.4/TB1/A13/OA1O',func=Pin.BIDIR,do_erc=True),
Pin(num='G7',name='P4.6/TBOUTH/A15/OA1I3',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F2330IRHA',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='40pin QFN, 32KB + 256B Flash Memory, 2KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F2350IRHA', 'MSP430F2370IRHA'],pins=[
Pin(num='1',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='XIN/CA6/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='XOUT/CA7/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='TACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='TA0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='TA1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='TA2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='SMCLK/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='TA0/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='TA1/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='P3.2/UCB0SOMI/UCB0SCL',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='P4.4/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='TA2/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='21',name='P3.3/UCB0CLK/UCA0STE',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='P4.5/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='ACLK/CA2/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='P3.4/UCA0TXD/UCA0SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='P4.6/TBOUTH/ACLK',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='TAINCLK/CA3/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='P3.5/UCA0RXD/UCA0SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='P4.7/TBCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='TA0/CAOUT/CA4/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='P3.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='TDO/TDI',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='TA1/CA0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='P3.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='TDI/TCLK',do_erc=True),
Pin(num='16',name='TA2/CA1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='P4.0/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='TMS',do_erc=True),
Pin(num='17',name='ROSC/CA5/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='P4.1/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='TCK',do_erc=True),
Pin(num='18',name='P3.0/UCB0STE/UCA0CLK',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='P4.2/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='~RST~/NMI',do_erc=True),
Pin(num='19',name='P3.1/UCB0SIMO/UCB0SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='P4.3/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='VSS',func=Pin.PWRIN,do_erc=True)]),
Part(name='MSP430F2330IYFF',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='49ball BGA, 32KB + 256B Flash Memory, 2KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F2350IYFF', 'MSP430F2370IYFF'],pins=[
Pin(num='A1',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='B1',name='VSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C1',name='TCK',do_erc=True),
Pin(num='D1',name='TDI/TCLK',do_erc=True),
Pin(num='E1',name='TDO/TDI',func=Pin.BIDIR,do_erc=True),
Pin(num='F1',name='P4.5/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='G1',name='P4.4/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='A2',name='XIN/CA6/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='C2',name='~RST~/NMI',do_erc=True),
Pin(num='D2',name='TMS',do_erc=True),
Pin(num='E2',name='P4.7/TBCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='F2',name='P4.6/TBOUTH/ACLK',func=Pin.BIDIR,do_erc=True),
Pin(num='G2',name='P4.2/TB2',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='XOUT/CA7/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='B3',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='F3',name='P4.3/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='G3',name='P4.1/TB1',func=Pin.BIDIR,do_erc=True),
Pin(num='B4',name='TACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='TA0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='F4',name='P4.0/TB0',func=Pin.BIDIR,do_erc=True),
Pin(num='G4',name='P3.7',func=Pin.BIDIR,do_erc=True),
Pin(num='A5',name='TA1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='B5',name='TA2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='C5',name='ACLK/CA2/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='F5',name='P3.6',func=Pin.BIDIR,do_erc=True),
Pin(num='G5',name='P3.5/UCA0RXD/UCA0SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='A6',name='SMCLK/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='B6',name='TA0/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='C6',name='TA0/CAOUT/CA4/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='D6',name='TA2/CA1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='E6',name='P3.0/UCB0STE/UCA0CLK',func=Pin.BIDIR,do_erc=True),
Pin(num='F6',name='P3.2/UCB0SOMI/UCB0SCL',func=Pin.BIDIR,do_erc=True),
Pin(num='G6',name='P3.4/UCA0TXD/UCA0SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='A7',name='TA1/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='B7',name='TA2/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='C7',name='TAINCLK/CA3/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='D7',name='TA1/CA0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='E7',name='ROSC/CA5/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='F7',name='P3.1/UCB0SIMO/UCB0SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='G7',name='P3.3/UCB0CLK/UCA0STE',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5217IRGC',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5219IRGC'],pins=[
Pin(num='1',name='P6.0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P6.4/CB4',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P6.5/CB5',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P6.6/CB6',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P6.7/CB7',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.0',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P5.1',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='DVIO',func=Pin.PWRIN,do_erc=True),
Pin(num='50',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='51',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='63',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='64',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='55',name='BSLEN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='~RST~/NMI',do_erc=True),
Pin(num='17',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='TA2CLK/SMCLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='49',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='59',name='SBWTCK/TEST',do_erc=True)]),
Part(name='MSP430F5217IYFF',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64ball BGA, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5219IYFF'],pins=[
Pin(num='A1',name='P6.1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='B1',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='D1',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='E1',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='F1',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G1',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H1',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='B2',name='P6.2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='C2',name='P6.0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='D2',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='E2',name='BSLEN',func=Pin.BIDIR,do_erc=True),
Pin(num='F2',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='G2',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='H2',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='P6.7/CB7',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='B3',name='P6.5/CB5',func=Pin.BIDIR,do_erc=True),
Pin(num='C3',name='P6.3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='E3',name='~RST~/NMI',do_erc=True),
Pin(num='F3',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='G3',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='H3',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='P5.1',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='P6.4/CB4',func=Pin.BIDIR,do_erc=True),
Pin(num='B4',name='P5.0',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='P6.6/CB6',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='D4',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='E4',name='SBWTCK/TEST',do_erc=True),
Pin(num='F4',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G4',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H4',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='A5',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='B5',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='C5',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='D5',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='E5',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='F5',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='G5',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='H5',name='DVIO',func=Pin.PWRIN,do_erc=True),
Pin(num='A6',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='C6',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='D6',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='G6',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='H6',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='A7',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='B7',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='C7',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='D7',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='E7',name='TA2CLK/SMCLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='F7',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G7',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='H7',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H7',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='J7',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A8',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='B8',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='C8',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='D8',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='E8',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='F8',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='G8',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='H8',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5227IRGC',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5229IRGC'],pins=[
Pin(num='1',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/CB1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/CB2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/CB3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='DVIO',func=Pin.PWRIN,do_erc=True),
Pin(num='50',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='51',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='63',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='64',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='55',name='BSLEN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='~RST~/NMI',do_erc=True),
Pin(num='17',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='TA2CLK/SMCLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='49',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='59',name='SBWTCK/TEST',do_erc=True)]),
Part(name='MSP430F5227IYFF',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64ball BGA, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5229IYFF'],pins=[
Pin(num='A1',name='P6.1/CB1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='B1',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='D1',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='E1',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='F1',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G1',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H1',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='B2',name='P6.2/CB2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='C2',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='D2',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='E2',name='BSLEN',func=Pin.BIDIR,do_erc=True),
Pin(num='F2',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='G2',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='H2',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='B3',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='C3',name='P6.3/CB3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='E3',name='~RST~/NMI',do_erc=True),
Pin(num='F3',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='G3',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='H3',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='B4',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='D4',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='E4',name='SBWTCK/TEST',do_erc=True),
Pin(num='F4',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G4',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H4',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='A5',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='B5',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='C5',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='D5',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='E5',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='F5',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='G5',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='H5',name='DVIO',func=Pin.PWRIN,do_erc=True),
Pin(num='A6',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='C6',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='D6',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='G6',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='H6',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='A7',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='B7',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='C7',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='D7',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='E7',name='TA2CLK/SMCLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='F7',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='G7',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='H7',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H7',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='J7',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A8',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='B8',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='C8',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='D8',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='E8',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='F8',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='G8',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='H8',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5232IRGZ',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='MSP430F2534, 48pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5234IRGZ'],pins=[
Pin(num='1',name='P6.3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.4/CB4',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.5/CB5',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P5.0',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.1',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='SBWTCK/TEST',do_erc=True),
Pin(num='11',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='VCORE',func=Pin.PASSIVE,do_erc=True),
Pin(num='22',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P6.0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='~RST~/NMI',do_erc=True),
Pin(num='47',name='P6.1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P6.2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5237IRGC',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5239IRGC'],pins=[
Pin(num='1',name='P6.0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P6.4/CB4',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P6.5/CB5',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P6.6/CB6',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P6.7/CB7',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.0',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P5.1',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='50',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='51',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='63',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='64',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='~RST~/NMI',do_erc=True),
Pin(num='17',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='SMCLK/TA2CLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='49',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='59',name='SBWTCK/TEST',do_erc=True)]),
Part(name='MSP430F5242IRGZ',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='MSP430F2544, 48pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5244IRGZ'],pins=[
Pin(num='1',name='P6.3/A3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.4/A4/CB4',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.5/A5/CB5',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='SBWTCK/TEST',do_erc=True),
Pin(num='11',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='VCORE',func=Pin.PASSIVE,do_erc=True),
Pin(num='22',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P6.0/A0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='~RST~/NMI',do_erc=True),
Pin(num='47',name='P6.1/A1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P6.2/A2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA1CLK/CBOUT/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5247IRGC',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64pin QFN, 128KB Flash Memory, 8KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5249IRGC'],pins=[
Pin(num='1',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/CB1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/CB2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/CB3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='50',name='P7.1/TB0.1',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='51',name='P7.2/TB0.2',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='P7.3/TB0.3',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='P7.4/TB0.4',func=Pin.BIDIR,do_erc=True),
Pin(num='63',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='P7.5/TB0.5',func=Pin.BIDIR,do_erc=True),
Pin(num='64',name='~RSTDVCC~/SBWTDIO',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='~RST~/NMI',do_erc=True),
Pin(num='17',name='VCORE',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='SMCLK/TA2CLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='49',name='P7.0/TB0.0',func=Pin.BIDIR,do_erc=True),
Pin(num='59',name='SBWTCK/TEST',do_erc=True)]),
Part(name='MSP430F5304IPT',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='48pin LQFN, 8KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='P6.0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='47',name='SBWTCK/TEST',do_erc=True),
Pin(num='18',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='19',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5304IRGZ',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='48pin QFN, 8KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='P6.0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='47',name='SBWTCK/TEST',do_erc=True),
Pin(num='18',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='19',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5308IPT',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='48pin LQFN, 32KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5309IPT', 'MSP430F5310IPT'],pins=[
Pin(num='1',name='P6.0/A0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/A1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/A2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/A3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='47',name='SBWTCK/TEST',do_erc=True),
Pin(num='18',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='19',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5308IRGC',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='64pin QFN, 32KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5309IRGC', 'MSP430F5310IRGC'],pins=[
Pin(num='1',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/CB1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/CB2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/CB3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='50',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='63',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='64',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='15',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='17',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='SMCLK/TA2CLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='39',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='49',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='59',name='SBWTCK/TEST',do_erc=True)]),
Part(name='MSP430F5308IRGZ',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='48pin QFN, 32KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5309IRGZ', 'MSP430F5310IRGZ'],pins=[
Pin(num='1',name='P6.0/A0/CB0',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.1/A1/CB1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.2/A2/CB2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.3/A3/CB3',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='37',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='47',name='SBWTCK/TEST',do_erc=True),
Pin(num='18',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='38',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='19',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5308IZQE',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='80ball BGA, 32KB Flash Memory, 6KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5309IZQE', 'MSP430F5310IZQE'],pins=[
Pin(num='A1',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='P6.2/CB2/A2',func=Pin.BIDIR,do_erc=True),
Pin(num='C1',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='D1',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='E1',name='P5.0/A8/VeREF+',func=Pin.BIDIR,do_erc=True),
Pin(num='F1',name='P5.4/XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='G1',name='P5.5/XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='H1',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='J1',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='A2',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='B2',name='P6.1/CB1/A1',func=Pin.BIDIR,do_erc=True),
Pin(num='C2',name='P6.3/CB3/A3',func=Pin.BIDIR,do_erc=True),
Pin(num='D2',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='E2',name='P5.1/A9/VeREF-',func=Pin.BIDIR,do_erc=True),
Pin(num='F2',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='G2',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='H2',name='ACLK/TA0CLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='J2',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='A3',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='B3',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='D3',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='E3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='F3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='H3',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='J3',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='A4',name='SBWTCK/TEST',do_erc=True),
Pin(num='B4',name='P5.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='C4',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='D4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='E4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='F4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G4',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H4',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='J4',name='TA0.4/P1.5',func=Pin.BIDIR,do_erc=True),
Pin(num='A5',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='B5',name='P5.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='C5',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='D5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='E5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='F5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G5',name='CBOUT/TA1CLK/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='H5',name='TA1.0/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='J5',name='TA1.1/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='A6',name='LDOO',func=Pin.PWROUT,do_erc=True),
Pin(num='C6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='D6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='E6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='F6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G6',name='TA1.2/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='H6',name='TA2.0/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='J6',name='SMCLK/TA2CLK/P2.2',func=Pin.BIDIR,do_erc=True),
Pin(num='A7',name='LDOI',func=Pin.PWRIN,do_erc=True),
Pin(num='C7',name='PM_NONE/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='D7',name='PM_UCA1TXD/PM_UCA1SIMO/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='E7',name='PM_UCB1SIMO/PM_UCB1SDA/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='F7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G7',name='UCA0RXD/UCA0SOMI/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='H7',name='UCB0STE/UCA0CLK/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='J7',name='TA2.1/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='A8',name='PU.1',func=Pin.BIDIR,do_erc=True),
Pin(num='B8',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='C8',name='PM_NONE/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='D8',name='PM_UCB1CLK/PM_UCA1STE/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='E8',name='PM_UCB1STE/PM_UCA1CLK/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='F8',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='G8',name='UCB0CLK/UCA0STE/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='H8',name='UCB0SIMO/UCB0SDA/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='J8',name='TA2.2/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='A9',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='B9',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='C9',name='PM_UCA1RXD/PM_UCA1SOMI/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='D9',name='PM_UCB1SOMI/PM_UCB1SCL/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='E9',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='F9',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='G9',name='UCA0TXD/UCA0SIMO/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='H9',name='UCB0SOMI/UCB0SCL/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='J9',name='RTCCLK/DMAE0/P2.6',func=Pin.BIDIR,do_erc=True)]),
Part(name='MSP430F5333IPZ',dest=TEMPLATE,tool=SKIDL,keywords='MSP430 MIXED SIGNAL MICROCONTROLLER',description='100pin VQFP, 256KB Flash Memory, 18KB RAM',ref_prefix='U',num_units=1,do_erc=True,aliases=['MSP430F5335IPZ'],pins=[
Pin(num='1',name='P6.4/CB4/A4',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='P6.5/CB5/A5',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='P6.6/CB6/A6',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='P6.7/CB7/A7',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='P7.4/CB8/A12',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='P7.5/CB9/A13',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='P7.6/CB10/A14',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='P7.7/CB11/A15',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='VREF+/VeREF+/P5.0',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='VREF-/VeREF-/P5.1',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='P2MAP3/P2.3',func=Pin.BIDIR,do_erc=True),
Pin(num='40',name='TA0.1/P1.6',func=Pin.BIDIR,do_erc=True),
Pin(num='50',name='TB0.0/P4.0',func=Pin.BIDIR,do_erc=True),
Pin(num='60',name='P8.2/UCA1TXD/UCA1SIMO',func=Pin.BIDIR,do_erc=True),
Pin(num='70',name='P9.2',func=Pin.BIDIR,do_erc=True),
Pin(num='80',name='LDOI',func=Pin.BIDIR,do_erc=True),
Pin(num='90',name='DVSS3',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='AVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='P2MAP4/P2.4',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='P5.3',func=Pin.BIDIR,do_erc=True),
Pin(num='41',name='TA0.2/P1.7',func=Pin.BIDIR,do_erc=True),
Pin(num='51',name='TB0.1/P4.1',func=Pin.BIDIR,do_erc=True),
Pin(num='61',name='P8.3/UCA1RXD/UCA1SOMI',func=Pin.BIDIR,do_erc=True),
Pin(num='71',name='P9.3',func=Pin.BIDIR,do_erc=True),
Pin(num='81',name='LDOO',func=Pin.BIDIR,do_erc=True),
Pin(num='91',name='TEST/SBWTCK',do_erc=True),
Pin(num='12',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='P2MAP5/P2.5',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='P5.4',func=Pin.BIDIR,do_erc=True),
Pin(num='42',name='TA1CLK/CBOUT/P3.0',func=Pin.BIDIR,do_erc=True),
Pin(num='52',name='TB0.2/P4.2',func=Pin.BIDIR,do_erc=True),
Pin(num='62',name='P8.4/UCB1CLK/UCA1STE',func=Pin.BIDIR,do_erc=True),
Pin(num='72',name='P9.4',func=Pin.BIDIR,do_erc=True),
Pin(num='92',name='PJ.0/TDO',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='XIN',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='P2MAP6/P2.6',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='P5.5',func=Pin.BIDIR,do_erc=True),
Pin(num='43',name='TA1.0/P3.1',func=Pin.BIDIR,do_erc=True),
Pin(num='53',name='TB0.3/P4.3',func=Pin.BIDIR,do_erc=True),
Pin(num='63',name='DVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='73',name='P9.5',func=Pin.BIDIR,do_erc=True),
Pin(num='83',name='AVSS3',func=Pin.PWRIN,do_erc=True),
Pin(num='93',name='PJ.1/TDI/TCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='XOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='P2MAP7/P2.7',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='TA0CLK/ACLK/P1.0',func=Pin.BIDIR,do_erc=True),
Pin(num='44',name='TA1.1/P3.2',func=Pin.BIDIR,do_erc=True),
Pin(num='54',name='TB0.4/P4.4',func=Pin.BIDIR,do_erc=True),
Pin(num='64',name='DVCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='74',name='P9.6',func=Pin.BIDIR,do_erc=True),
Pin(num='84',name='P7.2/XT2IN',func=Pin.BIDIR,do_erc=True),
Pin(num='94',name='PJ.2/TMS',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='DVCC1',func=Pin.PWRIN,do_erc=True),
Pin(num='35',name='TA0.0/P1.1',func=Pin.BIDIR,do_erc=True),
Pin(num='45',name='TA1.2/P3.3',func=Pin.BIDIR,do_erc=True),
Pin(num='55',name='TB0.5/P4.5',func=Pin.BIDIR,do_erc=True),
Pin(num='65',name='P8.5/UCB1SIMO/UCB1SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='75',name='P9.7',func=Pin.BIDIR,do_erc=True),
Pin(num='85',name='P7.3/XT2OUT',func=Pin.BIDIR,do_erc=True),
Pin(num='95',name='PJ.3/TCK',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='ADC12CLK/DMAE0/P5.6',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='DVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='36',name='TA0.1/P1.2',func=Pin.BIDIR,do_erc=True),
Pin(num='46',name='TA2CLK/SMCLK/P3.4',func=Pin.BIDIR,do_erc=True),
Pin(num='56',name='TB0.6/P4.6',func=Pin.BIDIR,do_erc=True),
Pin(num='66',name='P8.6/UCB1SOMI/UCB1SCL',func=Pin.BIDIR,do_erc=True),
Pin(num='76',name='VSSU',func=Pin.PWRIN,do_erc=True),
Pin(num='86',name='VBAK',func=Pin.BIDIR,do_erc=True),
Pin(num='96',name='~RST~/NMI/SBWTDIO',do_erc=True),
Pin(num='17',name='P2MAP0/P2.0',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='VCORE',func=Pin.BIDIR,do_erc=True),
Pin(num='37',name='TA0.2/P1.3',func=Pin.BIDIR,do_erc=True),
Pin(num='47',name='TA2.0/P3.5',func=Pin.BIDIR,do_erc=True),
Pin(num='57',name='TB0OUTH/SVMOUT/P4.7',func=Pin.BIDIR,do_erc=True),
Pin(num='67',name='P8.7',func=Pin.BIDIR,do_erc=True),
Pin(num='77',name='PU.0',func=Pin.BIDIR,do_erc=True),
Pin(num='87',name='VBAT',func=Pin.BIDIR,do_erc=True),
Pin(num='97',name='P6.0/CB0/A0',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='P2MAP1/P2.1',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='P5.2',func=Pin.BIDIR,do_erc=True),
Pin(num='38',name='TA0.3/P1.4',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='TA2.1/P3.6',func=Pin.BIDIR,do_erc=True),
Pin(num='58',name='P8.0/TB0CLK',func=Pin.BIDIR,do_erc=True),
| |
<gh_stars>1-10
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""Reductors based on H2-norm."""
from numbers import Integral, Real
import numpy as np
import scipy.linalg as spla
from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth
from pymor.algorithms.krylov import tangential_rational_krylov
from pymor.algorithms.sylvester import solve_sylv_schur
from pymor.algorithms.to_matrix import to_matrix
from pymor.core.base import BasicObject
from pymor.models.iosys import InputOutputModel, LTIModel
from pymor.operators.constructions import IdentityOperator
from pymor.parameters.base import Mu
from pymor.reductors.basic import LTIPGReductor
from pymor.reductors.interpolation import LTIBHIReductor, TFBHIReductor
from pymor.vectorarrays.numpy import NumpyVectorSpace
class GenericIRKAReductor(BasicObject):
"""Generic IRKA related reductor.
Parameters
----------
fom
The full-order |Model| to reduce.
mu
|Parameter values|.
"""
def _clear_lists(self):
self.sigma_list = []
self.b_list = []
self.c_list = []
self.conv_crit = []
self._conv_data = []
self.errors = []
def __init__(self, fom, mu=None):
if not isinstance(mu, Mu):
mu = fom.parameters.parse(mu)
assert fom.parameters.assert_compatible(mu)
self.fom = fom
self.mu = mu
self.V = None
self.W = None
self._pg_reductor = None
self._clear_lists()
def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
return self._pg_reductor.reconstruct(u)
def _check_rom0_params(self, rom0_params):
if isinstance(rom0_params, Integral):
assert rom0_params > 0
if hasattr(self.fom, 'order'): # self.fom can be a TransferFunction
assert rom0_params < self.fom.order
elif isinstance(rom0_params, np.ndarray):
assert rom0_params.ndim == 1
elif isinstance(rom0_params, dict):
assert ('sigma', 'b', 'c') in rom0_params
assert isinstance(rom0_params['sigma'], np.ndarray)
assert isinstance(rom0_params['b'], np.ndarray)
assert isinstance(rom0_params['c'], np.ndarray)
assert rom0_params['sigma'].ndim == 1
assert rom0_params['b'].shape[1] == self.fom.dim_input
assert rom0_params['c'].shape[1] == self.fom.dim_output
assert len(rom0_params['sigma']) == rom0_params['b'].shape[0]
assert len(rom0_params['sigma']) == rom0_params['c'].shape[0]
elif isinstance(rom0_params, LTIModel):
assert rom0_params.order > 0
if hasattr(self.fom, 'order'): # self.fom can be a TransferFunction
assert rom0_params < self.fom.order
assert rom0_params.dim_input == self.fom.dim_input
assert rom0_params.dim_output == self.fom.dim_output
else:
raise ValueError(f'rom0_params is of wrong type ({type(rom0_params)}).')
@staticmethod
def _check_common_args(tol, maxit, num_prev, conv_crit):
assert isinstance(tol, Real) and tol > 0
assert isinstance(maxit, Integral) and maxit >= 1
assert isinstance(num_prev, Integral) and num_prev >= 1
assert conv_crit in ('sigma', 'h2')
def _order_to_sigma_b_c(self, r):
sigma = np.logspace(-1, 1, r)
b = (np.ones((r, 1))
if self.fom.dim_input == 1
else np.random.RandomState(0).normal(size=(r, self.fom.dim_input)))
c = (np.ones((r, 1))
if self.fom.dim_output == 1
else np.random.RandomState(0).normal(size=(r, self.fom.dim_output)))
return sigma, b, c
@staticmethod
def _rom_to_sigma_b_c(rom, force_sigma_in_rhp):
poles, b, c = _lti_to_poles_b_c(rom)
sigma = (np.abs(poles.real) + poles.imag * 1j
if force_sigma_in_rhp
else -poles)
return sigma, b, c
def _rom0_params_to_sigma_b_c(self, rom0_params, force_sigma_in_rhp):
self.logger.info('Generating initial interpolation data')
self._check_rom0_params(rom0_params)
if isinstance(rom0_params, Integral):
sigma, b, c = self._order_to_sigma_b_c(rom0_params)
elif isinstance(rom0_params, np.ndarray):
sigma = rom0_params
_, b, c = self._order_to_sigma_b_c(len(rom0_params))
elif isinstance(rom0_params, dict):
sigma = rom0_params['sigma']
b = rom0_params['b']
c = rom0_params['c']
else:
sigma, b, c = self._rom_to_sigma_b_c(rom0_params, force_sigma_in_rhp)
return sigma, b, c
def _rom0_params_to_rom(self, rom0_params):
self.logger.info('Generating initial reduced-order model')
self._check_rom0_params(rom0_params)
if isinstance(rom0_params, Integral):
sigma, b, c = self._order_to_sigma_b_c(rom0_params)
rom0 = _poles_b_c_to_lti(-sigma, b, c)
elif isinstance(rom0_params, np.ndarray):
sigma = rom0_params
_, b, c = self._order_to_sigma_b_c(len(rom0_params))
rom0 = _poles_b_c_to_lti(-sigma, b, c)
elif isinstance(rom0_params, dict):
sigma = rom0_params['sigma']
b = rom0_params['b']
c = rom0_params['c']
rom0 = _poles_b_c_to_lti(-sigma, b, c)
else:
rom0 = rom0_params
return rom0
def _store_sigma_b_c(self, sigma, b, c):
if sigma is not None:
self.sigma_list.append(sigma)
if b is not None:
self.b_list.append(b)
if c is not None:
self.c_list.append(c)
def _update_conv_data(self, sigma, rom, conv_crit):
del self._conv_data[-1]
self._conv_data.insert(0, sigma if conv_crit == 'sigma' else rom)
def _compute_conv_crit(self, rom, conv_crit, it):
if conv_crit == 'sigma':
sigma = self._conv_data[0]
dist = min(spla.norm((sigma_old - sigma) / sigma_old, ord=np.inf)
for sigma_old in self._conv_data[1:]
if sigma_old is not None)
else:
if rom.poles().real.max() >= 0:
dist = np.inf
else:
dist = min((rom_old - rom).h2_norm() / rom_old.h2_norm()
if rom_old is not None and rom_old.poles().real.max() < 0
else np.inf
for rom_old in self._conv_data[1:])
self.conv_crit.append(dist)
self.logger.info(f'Convergence criterion in iteration {it + 1}: {dist:e}')
def _compute_error(self, rom, it, compute_errors):
if not compute_errors:
return
rel_h2_err = ((self.fom - rom).h2_norm() / self.fom.h2_norm()
if rom.poles().real.max() < 0
else np.inf)
self.errors.append(rel_h2_err)
self.logger.info(f'Relative H2-error in iteration {it + 1}: {rel_h2_err:e}')
class IRKAReductor(GenericIRKAReductor):
"""Iterative Rational Krylov Algorithm reductor.
Parameters
----------
fom
The full-order |LTIModel| to reduce.
mu
|Parameter values|.
"""
def __init__(self, fom, mu=None):
assert isinstance(fom, LTIModel)
super().__init__(fom, mu=mu)
def reduce(self, rom0_params, tol=1e-4, maxit=100, num_prev=1,
force_sigma_in_rhp=False, projection='orth', conv_crit='sigma',
compute_errors=False):
r"""Reduce using IRKA.
See [GAB08]_ (Algorithm 4.1) and [ABG10]_ (Algorithm 1).
Parameters
----------
rom0_params
Can be:
- order of the reduced model (a positive integer),
- initial interpolation points (a 1D |NumPy array|),
- dict with `'sigma'`, `'b'`, `'c'` as keys mapping to
initial interpolation points (a 1D |NumPy array|), right
tangential directions (|NumPy array| of shape
`(len(sigma), fom.dim_input)`), and left tangential directions
(|NumPy array| of shape `(len(sigma), fom.dim_input)`),
- initial reduced-order model (|LTIModel|).
If the order of reduced model is given, initial
interpolation data is generated randomly.
tol
Tolerance for the convergence criterion.
maxit
Maximum number of iterations.
num_prev
Number of previous iterations to compare the current
iteration to. Larger number can avoid occasional cyclic
behavior of IRKA.
force_sigma_in_rhp
If `False`, new interpolation are reflections of the current
reduced-order model's poles. Otherwise, only poles in the
left half-plane are reflected.
projection
Projection method:
- `'orth'`: projection matrices are orthogonalized with
respect to the Euclidean inner product
- `'biorth'`: projection matrices are biorthogolized with
respect to the E product
- `'arnoldi'`: projection matrices are orthogonalized using
the Arnoldi process (available only for SISO systems).
conv_crit
Convergence criterion:
- `'sigma'`: relative change in interpolation points
- `'h2'`: relative :math:`\mathcal{H}_2` distance of
reduced-order models
compute_errors
Should the relative :math:`\mathcal{H}_2`-errors of
intermediate reduced-order models be computed.
.. warning::
Computing :math:`\mathcal{H}_2`-errors is expensive. Use
this option only if necessary.
Returns
-------
rom
Reduced |LTIModel| model.
"""
if not self.fom.cont_time:
raise NotImplementedError
self._clear_lists()
sigma, b, c = self._rom0_params_to_sigma_b_c(rom0_params, force_sigma_in_rhp)
self._store_sigma_b_c(sigma, b, c)
self._check_common_args(tol, maxit, num_prev, conv_crit)
assert projection in ('orth', 'biorth', 'arnoldi')
if projection == 'arnoldi':
assert self.fom.dim_input == self.fom.dim_output == 1
self.logger.info('Starting IRKA')
self._conv_data = (num_prev + 1) * [None]
if conv_crit == 'sigma':
self._conv_data[0] = sigma
self._pg_reductor = LTIBHIReductor(self.fom, mu=self.mu)
for it in range(maxit):
rom = self._pg_reductor.reduce(sigma, b, c, projection=projection)
sigma, b, c = self._rom_to_sigma_b_c(rom, force_sigma_in_rhp)
self._store_sigma_b_c(sigma, b, c)
self._update_conv_data(sigma, rom, conv_crit)
self._compute_conv_crit(rom, conv_crit, it)
self._compute_error(rom, it, compute_errors)
if self.conv_crit[-1] < tol:
break
self.V = self._pg_reductor.V
self.W = self._pg_reductor.W
return rom
class OneSidedIRKAReductor(GenericIRKAReductor):
"""One-Sided Iterative Rational Krylov Algorithm reductor.
Parameters
----------
fom
The full-order |LTIModel| to reduce.
version
Version of the one-sided IRKA:
- `'V'`: Galerkin projection using the input Krylov subspace,
- `'W'`: Galerkin projection using the output Krylov subspace.
mu
|Parameter values|.
"""
def __init__(self, fom, version, mu=None):
assert isinstance(fom, LTIModel)
assert version in ('V', 'W')
super().__init__(fom, mu=mu)
self.version = version
def reduce(self, rom0_params, tol=1e-4, maxit=100, num_prev=1,
force_sigma_in_rhp=False, projection='orth', conv_crit='sigma',
compute_errors=False):
r"""Reduce using one-sided IRKA.
Parameters
----------
rom0_params
Can be:
- order of the reduced model (a positive integer),
- initial interpolation points (a 1D |NumPy array|),
- dict with `'sigma'`, `'b'`, `'c'` as keys mapping to
initial interpolation points (a 1D |NumPy array|), right
tangential directions (|NumPy array| of shape
`(len(sigma), fom.dim_input)`), and left tangential directions
(|NumPy array| of shape `(len(sigma), fom.dim_input)`),
- initial reduced-order model (|LTIModel|).
If the order of reduced model is given, initial
interpolation data is generated randomly.
tol
Tolerance for the largest change in interpolation points.
maxit
Maximum number of iterations.
num_prev
Number of previous iterations to compare the current
iteration to. A larger number can avoid occasional cyclic
behavior.
force_sigma_in_rhp
If `False`, new interpolation are reflections of the current
reduced-order model's poles. Otherwise, only poles in the
left half-plane are reflected.
projection
Projection method:
- `'orth'`: projection matrix is orthogonalized with respect
to the Euclidean inner product,
- `'Eorth'`: projection matrix is orthogonalized with
respect to the E product.
conv_crit
Convergence criterion:
- `'sigma'`: relative change in interpolation points,
- `'h2'`: relative :math:`\mathcal{H}_2` distance of
reduced-order models.
compute_errors
Should the relative :math:`\mathcal{H}_2`-errors of
intermediate reduced-order models be computed.
.. warning::
Computing :math:`\mathcal{H}_2`-errors is expensive. Use
this option only if necessary.
Returns
-------
rom
Reduced |LTIModel| model.
"""
if not self.fom.cont_time:
raise NotImplementedError
self._clear_lists()
sigma, b, c = self._rom0_params_to_sigma_b_c(rom0_params, force_sigma_in_rhp)
self._store_sigma_b_c(sigma, b, c)
self._check_common_args(tol, maxit, num_prev, conv_crit)
assert projection in ('orth', 'Eorth')
self.logger.info('Starting one-sided IRKA')
self._conv_data = (num_prev + 1) * [None]
if conv_crit == 'sigma':
| |
<filename>Lib/_collections_abc.py<gh_stars>1-10
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) dla collections, according to PEP 3119.
Unit tests are w test_collections.
"""
z abc zaimportuj ABCMeta, abstractmethod
zaimportuj sys
__all__ = ["Awaitable", "Coroutine", "AsyncIterable", "AsyncIterator",
"Hashable", "Iterable", "Iterator", "Generator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed z collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such jako MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register przy the various ABCs
# so that they will dalej tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: w other implementations, these types many nie be distinct
# oraz they make have their own implementation specific types that
# are nie included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
generator = type((lambda: (uzyskaj))())
## coroutine ##
async def _coro(): dalej
_coro = _coro()
coroutine = type(_coro)
_coro.close() # Prevent ResourceWarning
usuń _coro
### ONE-TRICK PONIES ###
klasa Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
zwróć 0
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Hashable:
dla B w C.__mro__:
jeżeli "__hash__" w B.__dict__:
jeżeli B.__dict__["__hash__"]:
zwróć Prawda
przerwij
zwróć NotImplemented
klasa Awaitable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __await__(self):
uzyskaj
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Awaitable:
dla B w C.__mro__:
jeżeli "__await__" w B.__dict__:
jeżeli B.__dict__["__await__"]:
zwróć Prawda
przerwij
zwróć NotImplemented
klasa Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next uzyskajed value albo podnieś StopIteration.
"""
podnieś StopIteration
@abstractmethod
def throw(self, typ, val=Nic, tb=Nic):
"""Raise an exception w the coroutine.
Return next uzyskajed value albo podnieś StopIteration.
"""
jeżeli val jest Nic:
jeżeli tb jest Nic:
podnieś typ
val = typ()
jeżeli tb jest nie Nic:
val = val.with_traceback(tb)
podnieś val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
spróbuj:
self.throw(GeneratorExit)
wyjąwszy (GeneratorExit, StopIteration):
dalej
inaczej:
podnieś RuntimeError("coroutine ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Coroutine:
mro = C.__mro__
dla method w ('__await__', 'send', 'throw', 'close'):
dla base w mro:
jeżeli method w base.__dict__:
przerwij
inaczej:
zwróć NotImplemented
zwróć Prawda
zwróć NotImplemented
Coroutine.register(coroutine)
klasa AsyncIterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
async def __aiter__(self):
zwróć AsyncIterator()
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest AsyncIterable:
jeżeli any("__aiter__" w B.__dict__ dla B w C.__mro__):
zwróć Prawda
zwróć NotImplemented
klasa AsyncIterator(AsyncIterable):
__slots__ = ()
@abstractmethod
async def __anext__(self):
"""Return the next item albo podnieś StopAsyncIteration when exhausted."""
podnieś StopAsyncIteration
async def __aiter__(self):
zwróć self
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest AsyncIterator:
jeżeli (any("__anext__" w B.__dict__ dla B w C.__mro__) oraz
any("__aiter__" w B.__dict__ dla B w C.__mro__)):
zwróć Prawda
zwróć NotImplemented
klasa Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
dopóki Nieprawda:
uzyskaj Nic
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Iterable:
jeżeli any("__iter__" w B.__dict__ dla B w C.__mro__):
zwróć Prawda
zwróć NotImplemented
klasa Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item z the iterator. When exhausted, podnieś StopIteration'
podnieś StopIteration
def __iter__(self):
zwróć self
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Iterator:
jeżeli (any("__next__" w B.__dict__ dla B w C.__mro__) oraz
any("__iter__" w B.__dict__ dla B w C.__mro__)):
zwróć Prawda
zwróć NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
klasa Generator(Iterator):
__slots__ = ()
def __next__(self):
"""Return the next item z the generator.
When exhausted, podnieś StopIteration.
"""
zwróć self.send(Nic)
@abstractmethod
def send(self, value):
"""Send a value into the generator.
Return next uzyskajed value albo podnieś StopIteration.
"""
podnieś StopIteration
@abstractmethod
def throw(self, typ, val=Nic, tb=Nic):
"""Raise an exception w the generator.
Return next uzyskajed value albo podnieś StopIteration.
"""
jeżeli val jest Nic:
jeżeli tb jest Nic:
podnieś typ
val = typ()
jeżeli tb jest nie Nic:
val = val.with_traceback(tb)
podnieś val
def close(self):
"""Raise GeneratorExit inside generator.
"""
spróbuj:
self.throw(GeneratorExit)
wyjąwszy (GeneratorExit, StopIteration):
dalej
inaczej:
podnieś RuntimeError("generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Generator:
mro = C.__mro__
dla method w ('__iter__', '__next__', 'send', 'throw', 'close'):
dla base w mro:
jeżeli method w base.__dict__:
przerwij
inaczej:
zwróć NotImplemented
zwróć Prawda
zwróć NotImplemented
Generator.register(generator)
klasa Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
zwróć 0
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Sized:
jeżeli any("__len__" w B.__dict__ dla B w C.__mro__):
zwróć Prawda
zwróć NotImplemented
klasa Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
zwróć Nieprawda
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Container:
jeżeli any("__contains__" w B.__dict__ dla B w C.__mro__):
zwróć Prawda
zwróć NotImplemented
klasa Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
zwróć Nieprawda
@classmethod
def __subclasshook__(cls, C):
jeżeli cls jest Callable:
jeżeli any("__call__" w B.__dict__ dla B w C.__mro__):
zwróć Prawda
zwróć NotImplemented
### SETS ###
klasa Set(Sized, Iterable, Container):
"""A set jest a finite, iterable container.
This klasa provides concrete generic implementations of all
methods wyjąwszy dla __contains__, __iter__ oraz __len__.
To override the comparisons (presumably dla speed, jako the
semantics are fixed), redefine __le__ oraz __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
jeżeli nie isinstance(other, Set):
zwróć NotImplemented
jeżeli len(self) > len(other):
zwróć Nieprawda
dla elem w self:
jeżeli elem nie w other:
zwróć Nieprawda
zwróć Prawda
def __lt__(self, other):
jeżeli nie isinstance(other, Set):
zwróć NotImplemented
zwróć len(self) < len(other) oraz self.__le__(other)
def __gt__(self, other):
jeżeli nie isinstance(other, Set):
zwróć NotImplemented
zwróć len(self) > len(other) oraz self.__ge__(other)
def __ge__(self, other):
jeżeli nie isinstance(other, Set):
zwróć NotImplemented
jeżeli len(self) < len(other):
zwróć Nieprawda
dla elem w other:
jeżeli elem nie w self:
zwróć Nieprawda
zwróć Prawda
def __eq__(self, other):
jeżeli nie isinstance(other, Set):
zwróć NotImplemented
zwróć len(self) == len(other) oraz self.__le__(other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the klasa z any iterable input.
Must override this method jeżeli the klasa constructor signature
does nie accept an iterable dla an input.
'''
zwróć cls(it)
def __and__(self, other):
jeżeli nie isinstance(other, Iterable):
zwróć NotImplemented
zwróć self._from_iterable(value dla value w other jeżeli value w self)
__rand__ = __and__
def isdisjoint(self, other):
'Return Prawda jeżeli two sets have a null intersection.'
dla value w other:
jeżeli value w self:
zwróć Nieprawda
zwróć Prawda
def __or__(self, other):
jeżeli nie isinstance(other, Iterable):
zwróć NotImplemented
chain = (e dla s w (self, other) dla e w s)
zwróć self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
jeżeli nie isinstance(other, Set):
jeżeli nie isinstance(other, Iterable):
zwróć NotImplemented
other = self._from_iterable(other)
zwróć self._from_iterable(value dla value w self
jeżeli value nie w other)
def __rsub__(self, other):
jeżeli nie isinstance(other, Set):
jeżeli nie isinstance(other, Iterable):
zwróć NotImplemented
other = self._from_iterable(other)
zwróć self._from_iterable(value dla value w other
jeżeli value nie w self)
def __xor__(self, other):
jeżeli nie isinstance(other, Set):
jeżeli nie isinstance(other, Iterable):
zwróć NotImplemented
other = self._from_iterable(other)
zwróć (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: nie all sets are hashable.
But jeżeli you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal jeżeli they contain the same
elements, regardless of how they are implemented, oraz
regardless of the order of the elements; so there's nie much
freedom dla __eq__ albo __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
dla x w self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
jeżeli h > MAX:
h -= MASK + 1
jeżeli h == -1:
h = 590923713
zwróć h
Set.register(frozenset)
klasa MutableSet(Set):
"""A mutable | |
<reponame>superarts/superarts.github.io
#!/usr/bin/python2.5
# modified from sexp.py, which can be found at http://code.google.com/p/pynarcissus/
import sys
import jsparser
################################################################################
# class Scope
################################################################################
# keeps track of generated local variables
class Scope:
def __init__(self):
self.vars = [[]]
self.locals = [set()]
# enter a new scope, forget about any local variables created before
def push(self):
self.vars.append([])
self.locals.append(set())
# returns the local variables allocated
def pop(self):
return sorted(list(self.locals.pop()), key=lambda x: int(x[1:]))
# allocate a local variable
def alloc(self):
t = "_%d"
i = 0
while (t % i) in self.vars[-1]:
i += 1
var = t % i
self.locals[-1].add(var)
self.vars[-1].append(var)
return var
# free a local variable (to minimize the number used)
def free(self, var):
if var in self.vars[-1]:
self.vars[-1].remove(var)
self.last_freed_var = var
# global scope object, each SCRIPT node pushes one more level on this
scope = Scope()
# used to implement reuse of locals
result_var = None
# interesting to know how many new vector allocations were avoided
allocations_avoided = 0
################################################################################
# code generation functions
################################################################################
# if a node is just an identifier, don't bother making a local variable for it
def isid(n):
return n.type == "IDENTIFIER" or n.type == "THIS" or n.type == "NUMBER"
# we can reuse temporaries created by inner computations
def isinline(n):
if n.type != "CALL":
return False
if n[0].type == "DOT" and n[0][1].type == "IDENTIFIER":
func = o(n[0][1])
if len(n[1]) == 0 and func in unary_funcs:
return True
elif len(n[1]) == 1 and func in binary_funcs:
return True
elif n[0].type == "IDENTIFIER":
func = o(n[0])
if func in global_funcs:
return True
return False
NEED_TEMP = 1
NEED_RESULT = 2
def wrap_unary(flags, func, inplace=None):
def convert(a):
global result_var, allocations_avoided
if inplace and isinline(a):
allocations_avoided += 1
r = unary_funcs[inplace](a)
r.append(result_var)
return r
r = []
va = o(a) if isid(a) else scope.alloc()
vr = scope.alloc() if flags & NEED_RESULT else None
vtemp = scope.alloc() if flags & NEED_TEMP else None
if not isid(a):
r.append("%s = %s" % (va, o(a)))
if vr:
r.append("%s = new Vector(0, 0)" % vr)
r += (func(vr, va, vtemp) if vtemp else func(vr, va)) + [vr]
else:
r += func(va, vtemp) if vtemp else func(va)
scope.free(va), scope.free(vr), scope.free(vtemp)
result_var = vr if vr else va
return r
return convert
def wrap_binary(flags, func, inplace = None):
def convert(a, b):
global result_var, allocations_avoided
if inplace and isinline(a):
allocations_avoided += 1
r = binary_funcs[inplace](a, b)
r.append(result_var)
return r
r = []
va = o(a) if isid(a) else scope.alloc()
vb = o(b) if isid(b) else scope.alloc()
vr = scope.alloc() if flags & NEED_RESULT else None
vtemp = scope.alloc() if flags & NEED_TEMP else None
if not isid(a):
r.append("%s = %s" % (va, o(a)))
if not isid(b):
r.append("%s = %s" % (vb, o(b)))
if vr:
r.append("%s = new Vector(0, 0)" % vr)
r += (func(vr, va, vb, vtemp) if vtemp else func(vr, va, vb)) + [vr]
else:
r += func(va, vb, vtemp) if vtemp else func(va, vb)
scope.free(va), scope.free(vr), scope.free(vb), scope.free(vtemp)
result_var = vr if vr else va
return r
return convert
def lerp(a, b, c):
va = o(a) if isid(a) else scope.alloc()
vb = o(b) if isid(b) else scope.alloc()
vc = o(c) if isid(c) else scope.alloc()
r = []
if not isid(a): r.append("%s = %s" % (va, o(a)))
if not isid(b): r.append("%s = %s" % (vb, o(b)))
if not isid(c): r.append("%s = %s" % (vc, o(c)))
r.append("%s + (%s - %s) * %s" % (va, vb, va, vc))
scope.free(va), scope.free(vb), scope.free(vc)
return r
def randInRange(a, b):
va = o(a) if isid(a) else scope.alloc()
vb = o(b) if isid(b) else scope.alloc()
r = []
if not isid(a): r.append("%s = %s" % (va, o(a)))
if not isid(b): r.append("%s = %s" % (vb, o(b)))
r.append("%s + (%s - %s) * Math.random()" % (va, vb, va))
scope.free(va), scope.free(vb)
return r
global_funcs = {
"lerp": lerp,
"randInRange": randInRange
}
unary_funcs = {
"unit": wrap_unary(NEED_RESULT | NEED_TEMP, lambda r, a, length: [
"%s = Math.sqrt(%s.x*%s.x + %s.y*%s.y)" % (length, a, a, a, a),
"%s.x = %s.x / %s" % (r, a, length),
"%s.y = %s.y / %s" % (r, a, length)
], "normalize"),
"normalize": wrap_unary(NEED_TEMP, lambda a, length: [
"%s = Math.sqrt(%s.x*%s.x + %s.y*%s.y)" % (length, a, a, a, a),
"%s.x /= %s" % (a, length),
"%s.y /= %s" % (a, length)
]),
"neg": wrap_unary(NEED_RESULT, lambda r, a: [
"%s.x = -%s.x" % (r, a),
"%s.y = -%s.y" % (r, a)
], "inplaceNeg"),
"flip": wrap_unary(NEED_RESULT, lambda r, a: [
"%s.x = %s.y" % (r, a),
"%s.y = -%s.x" % (r, a)
], "inplaceFlip"),
"length": wrap_unary(0, lambda a: [
"Math.sqrt(%s.x*%s.x + %s.y*%s.y)" % (a, a, a, a)
]),
"lengthSquared": wrap_unary(0, lambda a: [
"%s.x*%s.x + %s.y*%s.y" % (a, a, a, a)
]),
"inplaceNeg": wrap_unary(0, lambda a: [
"%s.x = -%s.x" % (a, a),
"%s.y = -%s.y" % (a, a)
]),
"inplaceFlip": wrap_unary(NEED_TEMP, lambda a, temp: [
"%s = %s.x" % (temp, a),
"%s.x = %s.y" % (a, a),
"%s.y = -%s" % (a, temp)
]),
}
binary_funcs = {
"add": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = %s.x + %s.x" % (r, a, b),
"%s.y = %s.y + %s.y" % (r, a, b)
], "inplaceAdd"),
"sub": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = %s.x - %s.x" % (r, a, b),
"%s.y = %s.y - %s.y" % (r, a, b)
], "inplaceSub"),
"mul": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = %s.x * %s" % (r, a, b),
"%s.y = %s.y * %s" % (r, a, b)
], "inplaceMul"),
"div": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = %s.x / %s" % (r, a, b),
"%s.y = %s.y / %s" % (r, a, b)
], "inplaceDiv"),
"minComponents": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = Math.min(%s.x, %s.x)" % (r, a, b),
"%s.y = Math.min(%s.y, %s.y)" % (r, a, b)
], "inplaceMinComponents"),
"maxComponents": wrap_binary(NEED_RESULT, lambda r, a, b: [
"%s.x = Math.max(%s.x, %s.x)" % (r, a, b),
"%s.y = Math.max(%s.y, %s.y)" % (r, a, b)
], "inplaceMaxComponents"),
"dot": wrap_binary(0, lambda a, b: [
"%s.x * %s.x + %s.y * %s.y" % (a, b, a, b)
]),
"inplaceAdd": wrap_binary(0, lambda a, b: [
"%s.x += %s.x" % (a, b),
"%s.y += %s.y" % (a, b)
]),
"inplaceSub": wrap_binary(0, lambda a, b: [
"%s.x -= %s.x" % (a, b),
"%s.y -= %s.y" % (a, b)
]),
"inplaceMul": wrap_binary(0, lambda a, b: [
"%s.x *= %s" % (a, b),
"%s.y *= %s" % (a, b)
]),
"inplaceDiv": wrap_binary(0, lambda a, b: [
"%s.x /= %s" % (a, b),
"%s.y /= %s" % (a, b)
]),
"inplaceMinComponents": wrap_binary(0, lambda a, b: [
"%s.x = Math.min(%s.x, %s.x)" % (a, a, b),
"%s.y = Math.min(%s.y, %s.y)" % (a, a, b)
]),
"inplaceMaxComponents": wrap_binary(0, lambda a, b: [
"%s.x = Math.max(%s.x, %s.x)" % (a, a, b),
"%s.y = Math.max(%s.y, %s.y)" % (a, a, b)
]),
}
################################################################################
# parse tree visitor
################################################################################
opmap = {
# unary operands
"NOT": "!",
"VOID": "void",
"UNARY_PLUS": "+",
"UNARY_MINUS": "-",
"BITWISE_NOT": "~",
# binary operands
"PLUS": "+",
"LT": "<",
"EQ": "==",
"AND": "&&",
"OR": "||",
"MINUS": "-",
"MUL": "*",
"LE": "<=",
"NE": "!=",
"STRICT_EQ": "===",
"DIV": "/",
"GE": ">=",
"INSTANCEOF": "instanceof",
"IN": "in",
"GT": ">",
"BITWISE_OR": "|",
"BITWISE_AND": "&",
"BITWISE_XOR": "^",
"STRICT_NE": "!==",
"LSH": "<<",
"RSH": ">>",
"URSH": ">>>",
"MOD": "%"
}
# interesting to know how many function calls got inlined
inline_count = 0
# modified from s-expr output example, just turns the parse tree back
# into javascript except specific function calls, which it inlines
# (sexp.py can be found at http://code.google.com/p/pynarcissus/)
def o(n, handledattrs=[]):
global inline_count
attrs_ = {}
for attr in handledattrs:
attrs_[attr] = True
subnodes_ = []
had_error = False
def check(attrs=[], optattrs=[], subnodes=0):
if not (type(attrs) == list and type(optattrs) == list and
type(subnodes) == int):
raise ProgrammerError, "Wrong arguments to check(...)!"
for attr in attrs: attrs_[attr] = True
for attr in optattrs:
if hasattr(n, attr): attrs_[attr] = True
for i in xrange(subnodes):
subnodes_.append(True)
try:
check(attrs=["append", "count", "extend", "filename", "getSource",
"indentLevel", "index", "insert", "lineno", "pop",
"remove", "reverse", "sort", "tokenizer", "type", "type_"],
optattrs=["end", "start", "value"])
if n.type == "ARRAY_INIT":
check(subnodes=len(n))
return "[" + ", ".join(o(x) if x else '' for x in n) + "]"
elif n.type == "ASSIGN":
check(subnodes=2)
if getattr(n[0],"assignOp", None) is not None:
return "%s %s= %s" % (o(n[0], handledattrs=["assignOp"]), jsparser.tokens[n[0].assignOp], o(n[1]))
else:
return "%s = %s" % (o(n[0], handledattrs=["assignOp"]), o(n[1]))
elif n.type == "BLOCK":
check(subnodes=len(n))
return "{%s\n}" % "".join("\n" + o(x) + ";" for x in n)
elif n.type in ("BREAK", "CONTINUE"):
check(attrs=["target"], optattrs=["label"])
if hasattr(n,"label"):
return "%s %s" % (n.value, n.label)
return n.value
elif n.type == "CALL":
check(subnodes=2)
if n[0].type == "DOT" and n[0][1].type == "IDENTIFIER":
# must pass n[0][0] and n[1][0] directly to unary_funcs[func] or binary_funcs[func]
# because of required order of scope.alloc() and scope.free()
func = o(n[0][1])
if len(n[1]) == 0 and func in unary_funcs:
inline_count += 1
return "(%s)" % ", ".join(unary_funcs[func](n[0][0]))
elif len(n[1]) == 1 and func in binary_funcs:
inline_count += 1
return "(%s)" % ", ".join(binary_funcs[func](n[0][0], n[1][0]))
elif n[0].type == "IDENTIFIER":
func = o(n[0])
if func in global_funcs:
inline_count += 1
return "(%s)" % ", ".join(global_funcs[func](*n[1]))
return "%s(%s)" % (o(n[0]), o(n[1]))
elif n.type == "CASE":
check(attrs=["caseLabel","statements"])
return "case %s:%s" % (o(n.caseLabel), o(n.statements))
elif n.type == "CATCH":
check(attrs=["block","guard","varName"])
return "catch (%s) %s" % (n.varName, o(n.block))
elif n.type == "COMMA":
check(subnodes=2)
return "%s" % ", ".join("%s" % o(x) for x in n)
elif n.type == "DEFAULT":
check(attrs=["statements"])
return "default: %s" % o(n.statements)
elif n.type == "NEW":
check(subnodes=1)
return "new %s()" % o(n[0])
elif n.type == "TYPEOF":
check(subnodes=1)
return "typeof %s " % o(n[0])
elif n.type == "DELETE":
check(subnodes=1)
return "delete %s" % o(n[0])
elif n.type in ("UNARY_MINUS", "NOT", "VOID", "BITWISE_NOT", "UNARY_PLUS"):
check(subnodes=1)
return "%s%s%s" % (opmap[n.type], " " if n.type == "VOID" else "", o(n[0]))
elif n.type == "DO":
check(attrs=["body", "condition", "isLoop"])
assert n.isLoop
return "do %s while (%s)" % (o(n.body), o(n.condition))
elif n.type == "DOT":
check(subnodes=2)
return "%s.%s" % (o(n[0]), o(n[1]))
elif n.type == "FUNCTION":
check(attrs=["functionForm","params","body"],
optattrs=["name"])
if n.functionForm == 0:
return "function %s(%s) {\n%s\n}" % | |
<filename>journalism/columns.py
#!/usr/bin/env python
from collections import Mapping, Sequence, defaultdict
import datetime
from functools import wraps
try:
from cdecimal import Decimal, InvalidOperation
except ImportError: #pragma: no cover
from decimal import Decimal, InvalidOperation
try:
from collections import OrderedDict
except ImportError: #pragma: no cover
from ordereddict import OrderedDict
from dateutil.parser import parse
import six
from journalism.exceptions import ColumnDoesNotExistError, NullComputationError, CastError
#: String values which will be automatically cast to :code:`None`.
NULL_VALUES = ('', 'na', 'n/a', 'none', 'null', '.')
#: String values which will be automatically cast to :code:`True`.
TRUE_VALUES = ('yes', 'y', 'true', 't')
#: String values which will be automatically cast to :code:`False`.
FALSE_VALUES = ('no', 'n', 'false', 'f')
def no_null_computations(func):
"""
Function decorator that prevents illogical computations
on columns containing nulls.
"""
@wraps(func)
def check(c, *args, **kwargs):
if c.has_nulls():
raise NullComputationError
return func(c)
return check
def _median(data_sorted):
"""
Compute the median value of a sequence of values.
:param data_sorted: A sorted sequence of :class:`decimal.Decimal`.
:returns: :class:`decimal.Decimal`.
"""
length = len(data_sorted)
if length % 2 == 1:
return data_sorted[((length + 1) // 2) - 1]
else:
half = length // 2
a = data_sorted[half - 1]
b = data_sorted[half]
return (a + b) / 2
class ColumnMapping(Mapping):
"""
Proxy access to :class:`Column` instances by name.
:param table: The :class:`.Table` containing the columns.
"""
def __init__(self, table):
self._table = table
def __getitem__(self, k):
try:
i = self._table._column_names.index(k)
except ValueError:
raise ColumnDoesNotExistError(k)
return self._table._get_column(i)
def __iter__(self):
return ColumnIterator(self._table)
def __len__(self):
return len(self._table._column_names)
class Column(Sequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances.
:param table: The table that contains this column.
:param index: The index of this column in the table.
"""
def __init__(self, table, index):
self._table = table
self._index = index
self._cached_data = None
self._cached_data_without_nulls = None
self._cached_data_sorted = None
def __unicode__(self):
data = self._data()
sample = ', '.join(six.text_type(d) for d in data[:5])
if len(data) > 5:
sample = '%s, ...' % sample
sample = '(%s)' % sample
return '<journalism.columns.%s: %s>' % (self.__class__.__name__, sample)
def __str__(self):
return str(self.__unicode__())
def _data(self):
if self._cached_data is None:
self._cached_data = tuple(r[self._index] for r in self._table._data)
return self._cached_data
def _data_without_nulls(self):
if self._cached_data_without_nulls is None:
self._cached_data_without_nulls = tuple(d for d in self._data() if d is not None)
return self._cached_data_without_nulls
def _data_sorted(self):
if self._cached_data_sorted is None:
self._cached_data_sorted = sorted(self._data())
return self._cached_data_sorted
def __getitem__(self, j):
return self._data()[j]
def __len__(self):
return len(self._data())
def __eq__(self, other):
"""
Ensure equality test with lists works.
"""
return self._data() == other
def __ne__(self, other):
"""
Ensure inequality test with lists works.
"""
return not self.__eq__(other)
def has_nulls(self):
"""
Returns True if this column contains null values.
"""
return None in self._data()
def any(self, test):
"""
Returns :code:`True` if any value passes a truth test.
:param test: A function that takes a value and returns :code:`True`
or :code:`False`.
"""
return any(test(d) for d in self._data())
def all(self, test):
"""
Returns :code:`True` if all values pass a truth test.
:param test: A function that takes a value and returns :code:`True`
or :code:`False`.
"""
return all(test(d) for d in self._data())
def count(self, value):
"""
Count the number of times a specific value occurs in this column.
:param value: The value to be counted.
"""
count = 0
for d in self._data():
if d == value:
count += 1
return count
def counts(self):
"""
Compute the number of instances of each unique value in this
column.
Returns a new :class:`.Table`, with two columns,
one containing the values and a a second, :class:`NumberColumn`
containing the counts.
Resulting table will be sorted by descending count.
"""
counts = OrderedDict()
for d in self._data():
if d not in counts:
counts[d] = 0
counts[d] += 1
column_names = (self._table._column_names[self._index], 'count')
column_types = (self._table._column_types[self._index], NumberType())
data = (tuple(i) for i in counts.items())
rows = sorted(data, key=lambda r: r[1], reverse=True)
return self._table._fork(rows, column_types, column_names)
class ColumnType(object):
"""
Base class for column data types.
"""
def create_column(self, table, index):
raise NotImplementedError
class TextColumn(Column):
"""
A column containing unicode/string data.
"""
def max_length(self):
return max([len(d) for d in self._data_without_nulls()])
class TextType(ColumnType):
"""
Column type for :class:`TextColumn`.
"""
def cast(self, d):
"""
Cast a single value to :func:`unicode` (:func:`str` in Python 3).
:param d: A value to cast.
:returns: :func:`unicode` (:func:`str` in Python 3) or :code:`None`
"""
if d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in NULL_VALUES:
return None
return six.text_type(d)
def create_column(self, table, index):
return TextColumn(table, index)
class BooleanColumn(Column):
"""
A column containing :func:`bool` data.
"""
def any(self):
"""
Returns :code:`True` if any value is :code:`True`.
"""
return any(self._data())
def all(self):
"""
Returns :code:`True` if all values are :code:`True`.
"""
return all(self._data())
class BooleanType(ColumnType):
"""
Column type for :class:`BooleanColumn`.
"""
def cast(self, d):
"""
Cast a single value to :func:`bool`.
:param d: A value to cast.
:returns: :func:`bool` or :code:`None`.
"""
if isinstance(d, bool) or d is None:
return d
if isinstance(d, six.string_types):
d = d.replace(',' ,'').strip()
d_lower = d.lower()
if d_lower in NULL_VALUES:
return None
if d_lower in TRUE_VALUES:
return True
if d_lower in FALSE_VALUES:
return False
raise CastError('Can not convert value %s to bool for BooleanColumn.' % d)
def create_column(self, table, index):
return BooleanColumn(table, index)
class NumberColumn(Column):
"""
A column containing numeric data.
All data is represented by the :class:`decimal.Decimal` class.'
"""
def sum(self):
"""
Compute the sum of this column.
:returns: :class:`decimal.Decimal`.
"""
return sum(self._data_without_nulls())
def min(self):
"""
Compute the minimum value of this column.
:returns: :class:`decimal.Decimal`.
"""
return min(self._data_without_nulls())
def max(self):
"""
Compute the maximum value of this column.
:returns: :class:`decimal.Decimal`.
"""
return max(self._data_without_nulls())
@no_null_computations
def mean(self):
"""
Compute the mean value of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
return self.sum() / len(self)
@no_null_computations
def median(self):
"""
Compute the median value of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
return _median(self._data_sorted())
@no_null_computations
def mode(self):
"""
Compute the mode value of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
data = self._data()
state = defaultdict(int)
for n in data:
state[n] += 1
return max(state.keys(), key=lambda x: state[x])
@no_null_computations
def variance(self):
"""
Compute the variance of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
data = self._data()
mean = self.mean()
return sum((n - mean) ** 2 for n in data) / len(data)
@no_null_computations
def stdev(self):
"""
Compute the standard of deviation of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
return self.variance().sqrt()
@no_null_computations
def mad(self):
"""
Compute the `median absolute deviation <http://en.wikipedia.org/wiki/Median_absolute_deviation>`_
of this column.
:returns: :class:`decimal.Decimal`.
:raises: :exc:`.NullComputationError`
"""
data = self._data_sorted()
m = _median(data)
return _median(tuple(abs(n - m) for n in data))
class NumberType(ColumnType):
"""
Column type for :class:`NumberColumn`.
"""
def cast(self, d):
"""
Cast a single value to a :class:`decimal.Decimal`.
:returns: :class:`decimal.Decimal` or :code:`None`.
:raises: :exc:`.CastError`
"""
if isinstance(d, Decimal) or d is None:
return d
if isinstance(d, six.string_types):
d = d.replace(',' ,'').strip()
if d.lower() in NULL_VALUES:
return None
if isinstance(d, float):
raise CastError('Can not convert float to Decimal for NumberColumn. Convert data to string first!')
try:
return Decimal(d)
except InvalidOperation:
raise CastError('Can not convert value "%s" to Decimal for NumberColumn.' % d)
def create_column(self, table, index):
return NumberColumn(table, index)
class DateColumn(Column):
"""
A column containing :func:`datetime.date` data.
"""
def min(self):
"""
Compute the earliest date in this column.
:returns: :class:`datetime.date`.
"""
return min(self._data_without_nulls())
def max(self):
"""
Compute the latest date in this column.
:returns: :class:`datetime.date`.
"""
return max(self._data_without_nulls())
class DateType(ColumnType):
"""
Column type for :class:`DateColumn`.
"""
def __init__(self, date_format=None):
self.date_format = date_format
def cast(self, d):
"""
Cast a single value to a :class:`datetime.date`.
:param date_format: An optional :func:`datetime.strptime`
format string for parsing dates in this column.
:returns: :class`datetime.date` or :code:`None`.
:raises: :exc:`.CastError`
"""
if isinstance(d, datetime.date) or d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in NULL_VALUES:
return None
if self.date_format:
return datetime.datetime.strptime(d, self.date_format).date()
return parse(d).date()
def create_column(self, table, index):
return DateColumn(table, index)
class ColumnIterator(six.Iterator):
"""
Iterator over :class:`Column` instances.
:param table: The :class:`.Table` containing the columns.
"""
def __init__(self, table):
self._table = table
self._i = 0
def __next__(self):
try:
| |
import numpy as np
from layer import Layer
import pickle as cpickle
import os, sys
import pickle as cpickle
import csv, json
import datetime, time
import torch
from tensorboardX import SummaryWriter
import torchvision
from utils import project_state, save_video, attention, gaussian_attention, multivariate_gaussian_attention, render_image_for_video
from collections import OrderedDict, defaultdict
# Below class instantiates an agent
class Agent():
def __init__(self,FLAGS, env, agent_params):
self.FLAGS = FLAGS
if self.FLAGS.torch:
import torch
import random
random.seed(self.FLAGS.seed)
torch.manual_seed(self.FLAGS.seed)
torch.cuda.manual_seed_all(self.FLAGS.seed)
torch.cuda.manual_seed(self.FLAGS.seed)
np.random.seed(self.FLAGS.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.sess = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
self.sess = tf.Session(config=config)
# Set subgoal testing ratio each layer will use
self.subgoal_test_perc = agent_params["subgoal_test_perc"]
if self.FLAGS.no_middle_level:
self.FLAGS.layers = 2
self.FLAGS.time_limit = 25
# Create agent with number of levels specified by user
lowest_layer_class = NegDistLayer if self.FLAGS.negative_distance or FLAGS.dense_reward else Layer
highest_layer_class = OracleLayer if self.FLAGS.oracle else Layer
self.layers = [lowest_layer_class(0, FLAGS, env, self.sess, agent_params)]
self.layers = self.layers + [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(1, FLAGS.layers-1)]
self.layers.append(highest_layer_class(FLAGS.layers-1, FLAGS, env, self.sess, agent_params))
self.radius_learner = RadiusLearner(self.sess, env, self.FLAGS, 1) if self.FLAGS.radius_learner else None
# Below attributes will be used help save network parameters
self.saver = None
self.model_dir = None
self.model_loc = None
# Initialize actor/critic networks. Load saved parameters if not retraining
self.initialize_networks()
# goal_array will store goal for each layer of agent.
self.goal_array = [None for i in range(FLAGS.layers)]
self.current_state = None
# Track number of low-level actions executed
self.steps_taken = 0
self.total_steps_taken = 0
self.image_path = None
# Below hyperparameter specifies number of Q-value updates made after each episode
self.num_updates = 40
# Below parameters will be used to store performance results
self.performance_log = []
self.other_params = agent_params
self.end_goal_thresholds = torch.tensor(env.end_goal_thresholds, dtype=torch.float32, device=self.sess)
self.subgoal_thresholds = torch.tensor(env.subgoal_thresholds, dtype=torch.float32, device=self.sess)
# Determine whether or not each layer's goal was achieved. Also, if applicable, return the highest level whose goal was achieved.
def check_goals(self,env):
# goal_status is vector showing status of whether a layer's goal has been achieved
goal_status = [False for i in range(self.FLAGS.layers)]
max_lay_achieved = None
# Project current state onto the subgoal and end goal spaces
proj_subgoal = env.project_state_to_subgoal(None, self.current_state)
proj_end_goal = env.project_state_to_end_goal(None, self.current_state)
far_fn_glob = lambda goal, pos, thres: torch.abs(goal - pos) > thres
far_fn_rel = lambda goal, pos, thres: torch.abs(goal) > thres
for i in range(self.FLAGS.layers):
goal_achieved = True
far_fn = far_fn_rel if (self.layers[i].relative_subgoals) else far_fn_glob
# If at highest layer, compare to end goal thresholds
if i == self.FLAGS.layers - 1 or (i == self.FLAGS.layers - 2 and self.FLAGS.oracle):
# Check dimensions are appropriate
assert len(proj_end_goal) == len(self.goal_array[i]) == len(self.end_goal_thresholds), "Projected end goal, actual end goal, and end goal thresholds should have same dimensions"
# Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold
if far_fn(self.goal_array[i], proj_end_goal, self.end_goal_thresholds).any():
goal_achieved = False
# If not highest layer, compare to subgoal thresholds
else:
# Check that dimensions are appropriate
assert len(proj_subgoal) == len(self.goal_array[i]) == len(self.subgoal_thresholds), "Projected subgoal, actual subgoal, and subgoal thresholds should have same dimensions"
# Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold
if far_fn(self.goal_array[i], proj_subgoal, self.subgoal_thresholds).any():
goal_achieved = False
# If projected state within threshold of goal, mark as achieved
if goal_achieved:
goal_status[i] = True
max_lay_achieved = i
else:
goal_status[i] = False
return goal_status, max_lay_achieved
def datetimestamp(self, divider='-', datetime_divider='T'):
now = datetime.datetime.now()
return now.strftime(
'%Y{d}%m{d}%dT%H{d}%M{d}%S'
''.format(d=divider, dtd=datetime_divider))
def initialize_networks(self):
if not self.FLAGS.torch:
import tensorflow as tf
model_vars = tf.trainable_variables()
self.saver = tf.train.Saver(model_vars)
lower_policy_weights = tf.trainable_variables("critic_0_") + tf.trainable_variables("actor_0_")
self.saver_lower_policy = tf.train.Saver(lower_policy_weights)
# higher_policy_weights = (tf.trainable_variables("critic_1_") + tf.trainable_variables("actor_1_") +
# tf.trainable_variables("critic_2_") + tf.trainable_variables("actor_2_"))
# self.saver_higher_policy = tf.train.Saver(higher_policy_weights)
# Set up directory for saving models
self.model_dir = os.path.join(os.getcwd(), 'models', self.FLAGS.exp_name, str(self.FLAGS.exp_num))
os.makedirs(self.model_dir, exist_ok=True)
model_working_dir = os.path.join(os.getcwd(), 'models_working')
model_negdist_dir = os.path.join(os.getcwd(), 'models_negative_distance')
self.model_loc = self.model_dir + ('/HAC.pkl' if self.FLAGS.torch else '/HAC.ckpt')
if not self.FLAGS.test:
self.tb_writter = SummaryWriter(self.model_dir)
self.performance_path = os.path.join(self.model_dir, "performance_log.txt")
self.metrics_path = os.path.join(self.model_dir, "progress.csv")
self.metrics_keys = OrderedDict( {key:None for key in sorted([
'critic_0/Q_val', 'critic_1/Q_val', 'critic_2/Q_val',
'critic_0/Q_loss', 'critic_1/Q_loss', 'critic_2/Q_loss',
'vpn_critic_2/Q_val', 'vpn_critic_2/Q_loss',
'actor_0/alpha', 'actor_1/alpha', 'actor_2/alpha',
'actor_2/mask_percentage', 'actor_2/sl_loss',
'steps_taken', 'test/success_rate', 'total_steps_taken',
'sample_time', 'train_time', 'epoch_time',
'subgoal_distances1', 'subgoal_distances2',
'goal_subgoal_distance1', 'goal_subgoal_distance2',
'lower_Q_val1', 'lower_Q_val2',
'radius_learner/mse_loss',
'buffer/Q_val_lower_clipped1', 'buffer/Q_val_lower1', 'buffer/Q_val_lower_too_low1',
'buffer/Q_val_lower_clipped2', 'buffer/Q_val_lower2', 'buffer/Q_val_lower_too_low2',
'actor_0/loss', 'actor_1/loss','actor_2/loss',
'bandit/q_loss', 'bandit/q_val','bandit/pi_loss', 'bandit/sigmas',
'bandit/rewards',
])})
if self.FLAGS.retrain:
with open(self.metrics_path, 'w+') as f:
print(','.join(self.metrics_keys.keys()), file=f)
with open(os.path.join(self.model_dir, "params.json"), 'w+') as f:
json.dump({
'run_id': "%s_%d_%s" % (self.FLAGS.exp_name, self.FLAGS.exp_num, self.datetimestamp()),
'run_command': ' '.join(sys.argv),
'target_networks': not self.FLAGS.no_target_net,
'num_Qs': self.FLAGS.num_Qs,
'exp_name': self.FLAGS.exp_name,
'exp_num': self.FLAGS.exp_num,
'negative_distance': self.FLAGS.negative_distance,
'bayes': self.FLAGS.bayes,
'oracle': self.FLAGS.oracle,
'variant': self.FLAGS.variant,
'actor_grads': self.FLAGS.actor_grads,
'orig_trans': self.FLAGS.orig_trans,
'relative_subgoals': self.FLAGS.relative_subgoals,
'sl_oracle': self.FLAGS.sl_oracle,
'semi_oracle': self.FLAGS.semi_oracle,
'radius_learner': self.FLAGS.radius_learner,
'priority_replay': self.FLAGS.priority_replay,
}, f, indent=4, sort_keys=True)
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
# Initialize actor/critic networks
if not self.FLAGS.torch:
import tensorflow as tf
self.sess.run(tf.global_variables_initializer())
if self.FLAGS.td3:
for layer in self.layers:
if hasattr(layer, 'actor') and hasattr(layer.actor, 'update_target_weights_init'):
print("Copying init weights for", layer.actor.actor_name)
self.sess.run(layer.actor.update_target_weights_init)
if hasattr(layer, 'critic') and hasattr(layer.critic, 'update_target_weights_init'):
print("Copying init weights for", layer.critic.critic_name)
self.sess.run(layer.critic.update_target_weights_init)
# If not retraining, restore weights
# if we are not retraining from scratch, just restore weights
if self.FLAGS.retrain == False:
# self.saver_higher_policy.restore(self.sess, tf.train.latest_checkpoint(model_working_dir))
# self.saver_lower_policy.restore(self.sess, tf.train.latest_checkpoint(model_working_dir))
# self.saver.restore(self.sess, tf.train.latest_checkpoint(model_negdist_dir))
with open(os.path.join(self.model_dir, "params.json"), 'r') as f:
variant = json.load(f)
flag_dict = vars(self.FLAGS)
for variant_key in variant:
if variant_key in ['run_id', 'run_command', 'target_networks', 'variant', 'ddl']:
continue
assert variant[variant_key] == flag_dict[variant_key], (variant_key, variant[variant_key, flag_dict[variant_key]])
assert variant['target_networks'] == (not flag_dict['no_target_net'])
print(self.model_dir)
if self.FLAGS.torch:
import torch
self.load_state_dict(torch.load(self.model_loc, self.sess))
else:
import tensorflow as tf
self.saver.restore(self.sess, tf.train.latest_checkpoint(self.model_dir))
def state_dict(self):
result = {}
for i, layer in enumerate(self.layers):
if hasattr(layer, 'actor'):
result['layer_%d_actor' % i] = layer.actor.state_dict()
if hasattr(layer, 'critic'):
result['layer_%d_critic' % i] = layer.critic.state_dict()
if self.radius_learner is not None:
result['radius_learner'] = self.radius_learner.state_dict()
return result
def load_state_dict(self, state_dict):
result = {}
for i, layer in enumerate(self.layers):
if hasattr(layer, 'actor'):
layer.actor.load_state_dict(state_dict['layer_%d_actor' % i])
if hasattr(layer, 'critic'):
layer.critic.load_state_dict(state_dict['layer_%d_critic' % i])
if self.radius_learner is not None:
self.radius_learner.load_state_dict(state_dict['radius_learner'])
# Save neural network parameters
def save_model(self, episode, success_rate = None):
if self.FLAGS.torch:
import torch
if success_rate is not None and success_rate >= 0:
extra_location = '{}/HAC_{}_{}.pkl'.format(self.model_dir, episode, int(success_rate))
torch.save(self.state_dict(), extra_location)
torch.save(self.state_dict(), self.model_loc)
else:
self.saver.save(self.sess, self.model_loc, global_step=episode)
# Update actor and critic networks for each layer
def learn(self, env, metrics):
for i in range(len(self.layers)):
self.layers[i].learn(env, self, self.num_updates, metrics)
# self.layers[0].learn(self.num_updates)
metrics['total_steps_taken'] = self.total_steps_taken
metrics['steps_taken'] = self.steps_taken
if not self.FLAGS.train_only:
metrics['test/success_rate'] = self.performance_log[-1]
# Train agent for an episode
def train(self, env, episode_num, batch):
metrics = {}
epoch_start = time.time()
# Select initial state from in initial state space, defined in environment.py
self.current_state = torch.tensor(env.reset_sim(self.goal_array[self.FLAGS.layers - 1]), device=self.sess, dtype=torch.float32)
if "ant" in env.name.lower():
print("Initial Ant Position: ", self.current_state[:2])
# print("Initial State: ", self.current_state)
if self.FLAGS.save_video:
self.image_path = [env.crop_raw(env.render(mode='rgb_array'))]
# Select final goal from final goal space, defined in "design_agent_and_env.py"
self.goal_array[self.FLAGS.layers - 1] = torch.tensor(env.get_next_goal(self.FLAGS.test), dtype=torch.float32, device=self.sess)
print("Next End Goal absolute: ", self.goal_array[self.FLAGS.layers - 1])
#if self.FLAGS.relative_subgoals:
# self.goal_array[self.FLAGS.layers - 1] -= project_state(env, self.FLAGS, 0, self.current_state)
#print("Next End Goal relative: ", self.goal_array[self.FLAGS.layers - 1])
# Reset step counter
self.steps_taken = 0
# Train for an episode
goal_status, max_lay_achieved = self.layers[self.FLAGS.layers-1].train(self,env, metrics, episode_num = episode_num)
sample_end = time.time()
metrics['sample_time'] = sample_end - epoch_start
for i_layer in range(self.FLAGS.layers):
for key, values in self.layers[i_layer].agg_metrics.items():
metrics[key+str(i_layer)] = np.mean(values)
self.layers[i_layer].agg_metrics = defaultdict(list)
if self.FLAGS.vpn and self.FLAGS.learn_sigma:
for key, values in self.layers[-1].actor.bandit.agg_metrics.items():
metrics[key] = np.mean(values)
self.layers[-1].actor.bandit.agg_metrics = defaultdict(list)
metrics['sample_time'] = sample_end - epoch_start
if self.FLAGS.save_video:
save_video(self.image_path, os.path.join(self.model_dir, "test_episode_%d.avi"%episode_num))
del self.image_path[:]
# Update actor/critic networks if not testing
print(self.steps_taken)
if not self.FLAGS.test:
self.learn(env, metrics)
epoch_end = time.time()
metrics['train_time'] = epoch_end - sample_end
metrics['epoch_time'] = epoch_end - epoch_start
self.log_metrics(metrics, episode_num, env)
# Return whether end goal was achieved
return goal_status[self.FLAGS.layers-1]
# Save performance evaluations
def log_performance(self, success_rate):
# Add latest success_rate to list
self.performance_log.append(success_rate)
# Save log
with open(self.performance_path, "w+") as f:
print(self.performance_log, file=f)
def log_metrics(self, metrics, episode_num, env):
if self.FLAGS.test: return
for key, metric in metrics.items():
self.tb_writter.add_scalar(key, metric, self.total_steps_taken)
if self.FLAGS.vpn and episode_num == 1: # Log once for every batch, i.e. every train 100 episodes
def subtract_channels(tensor, dim):
grid, pos | |
"""
return pulumi.get(self, "server_root_ca_certificate")
@property
@pulumi.getter
def site(self) -> Optional[str]:
"""
The Active Directory site the service will limit Domain Controller discovery to
"""
return pulumi.get(self, "site")
@property
@pulumi.getter(name="smbServerName")
def smb_server_name(self) -> Optional[str]:
"""
NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
"""
return pulumi.get(self, "smb_server_name")
@property
@pulumi.getter
def username(self) -> Optional[str]:
"""
Username of Active Directory domain administrator
"""
return pulumi.get(self, "username")
@pulumi.output_type
class DailyScheduleResponse(dict):
"""
Daily Schedule properties
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "snapshotsToKeep":
suggest = "snapshots_to_keep"
elif key == "usedBytes":
suggest = "used_bytes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DailyScheduleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DailyScheduleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DailyScheduleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
hour: Optional[int] = None,
minute: Optional[int] = None,
snapshots_to_keep: Optional[int] = None,
used_bytes: Optional[float] = None):
"""
Daily Schedule properties
:param int hour: Indicates which hour in UTC timezone a snapshot should be taken
:param int minute: Indicates which minute snapshot should be taken
:param int snapshots_to_keep: Daily snapshot count to keep
:param float used_bytes: Resource size in bytes, current storage usage for the volume in bytes
"""
if hour is not None:
pulumi.set(__self__, "hour", hour)
if minute is not None:
pulumi.set(__self__, "minute", minute)
if snapshots_to_keep is not None:
pulumi.set(__self__, "snapshots_to_keep", snapshots_to_keep)
if used_bytes is not None:
pulumi.set(__self__, "used_bytes", used_bytes)
@property
@pulumi.getter
def hour(self) -> Optional[int]:
"""
Indicates which hour in UTC timezone a snapshot should be taken
"""
return pulumi.get(self, "hour")
@property
@pulumi.getter
def minute(self) -> Optional[int]:
"""
Indicates which minute snapshot should be taken
"""
return pulumi.get(self, "minute")
@property
@pulumi.getter(name="snapshotsToKeep")
def snapshots_to_keep(self) -> Optional[int]:
"""
Daily snapshot count to keep
"""
return pulumi.get(self, "snapshots_to_keep")
@property
@pulumi.getter(name="usedBytes")
def used_bytes(self) -> Optional[float]:
"""
Resource size in bytes, current storage usage for the volume in bytes
"""
return pulumi.get(self, "used_bytes")
@pulumi.output_type
class ExportPolicyRuleResponse(dict):
"""
Volume Export Policy Rule
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedClients":
suggest = "allowed_clients"
elif key == "hasRootAccess":
suggest = "has_root_access"
elif key == "kerberos5ReadOnly":
suggest = "kerberos5_read_only"
elif key == "kerberos5ReadWrite":
suggest = "kerberos5_read_write"
elif key == "kerberos5iReadOnly":
suggest = "kerberos5i_read_only"
elif key == "kerberos5iReadWrite":
suggest = "kerberos5i_read_write"
elif key == "kerberos5pReadOnly":
suggest = "kerberos5p_read_only"
elif key == "kerberos5pReadWrite":
suggest = "kerberos5p_read_write"
elif key == "ruleIndex":
suggest = "rule_index"
elif key == "unixReadOnly":
suggest = "unix_read_only"
elif key == "unixReadWrite":
suggest = "unix_read_write"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ExportPolicyRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ExportPolicyRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ExportPolicyRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_clients: Optional[str] = None,
cifs: Optional[bool] = None,
has_root_access: Optional[bool] = None,
kerberos5_read_only: Optional[bool] = None,
kerberos5_read_write: Optional[bool] = None,
kerberos5i_read_only: Optional[bool] = None,
kerberos5i_read_write: Optional[bool] = None,
kerberos5p_read_only: Optional[bool] = None,
kerberos5p_read_write: Optional[bool] = None,
nfsv3: Optional[bool] = None,
nfsv41: Optional[bool] = None,
rule_index: Optional[int] = None,
unix_read_only: Optional[bool] = None,
unix_read_write: Optional[bool] = None):
"""
Volume Export Policy Rule
:param str allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param bool cifs: Allows CIFS protocol
:param bool has_root_access: Has root access to volume
:param bool kerberos5_read_only: Kerberos5 Read only access. To be use with swagger version 2020-05-01 or later
:param bool kerberos5_read_write: Kerberos5 Read and write access. To be use with swagger version 2020-05-01 or later
:param bool kerberos5i_read_only: Kerberos5i Read only access. To be use with swagger version 2020-05-01 or later
:param bool kerberos5i_read_write: Kerberos5i Read and write access. To be use with swagger version 2020-05-01 or later
:param bool kerberos5p_read_only: Kerberos5p Read only access. To be use with swagger version 2020-05-01 or later
:param bool kerberos5p_read_write: Kerberos5p Read and write access. To be use with swagger version 2020-05-01 or later
:param bool nfsv3: Allows NFSv3 protocol. Enable only for NFSv3 type volumes
:param bool nfsv41: Allows NFSv4.1 protocol. Enable only for NFSv4.1 type volumes
:param int rule_index: Order index
:param bool unix_read_only: Read only access
:param bool unix_read_write: Read and write access
"""
if allowed_clients is not None:
pulumi.set(__self__, "allowed_clients", allowed_clients)
if cifs is not None:
pulumi.set(__self__, "cifs", cifs)
if has_root_access is None:
has_root_access = True
if has_root_access is not None:
pulumi.set(__self__, "has_root_access", has_root_access)
if kerberos5_read_only is None:
kerberos5_read_only = False
if kerberos5_read_only is not None:
pulumi.set(__self__, "kerberos5_read_only", kerberos5_read_only)
if kerberos5_read_write is None:
kerberos5_read_write = False
if kerberos5_read_write is not None:
pulumi.set(__self__, "kerberos5_read_write", kerberos5_read_write)
if kerberos5i_read_only is None:
kerberos5i_read_only = False
if kerberos5i_read_only is not None:
pulumi.set(__self__, "kerberos5i_read_only", kerberos5i_read_only)
if kerberos5i_read_write is None:
kerberos5i_read_write = False
if kerberos5i_read_write is not None:
pulumi.set(__self__, "kerberos5i_read_write", kerberos5i_read_write)
if kerberos5p_read_only is None:
kerberos5p_read_only = False
if kerberos5p_read_only is not None:
pulumi.set(__self__, "kerberos5p_read_only", kerberos5p_read_only)
if kerberos5p_read_write is None:
kerberos5p_read_write = False
if kerberos5p_read_write is not None:
pulumi.set(__self__, "kerberos5p_read_write", kerberos5p_read_write)
if nfsv3 is not None:
pulumi.set(__self__, "nfsv3", nfsv3)
if nfsv41 is not None:
pulumi.set(__self__, "nfsv41", nfsv41)
if rule_index is not None:
pulumi.set(__self__, "rule_index", rule_index)
if unix_read_only is not None:
pulumi.set(__self__, "unix_read_only", unix_read_only)
if unix_read_write is not None:
pulumi.set(__self__, "unix_read_write", unix_read_write)
@property
@pulumi.getter(name="allowedClients")
def allowed_clients(self) -> Optional[str]:
"""
Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
"""
return pulumi.get(self, "allowed_clients")
@property
@pulumi.getter
def cifs(self) -> Optional[bool]:
"""
Allows CIFS protocol
"""
return pulumi.get(self, "cifs")
@property
@pulumi.getter(name="hasRootAccess")
def has_root_access(self) -> Optional[bool]:
"""
Has root access to volume
"""
return pulumi.get(self, "has_root_access")
@property
@pulumi.getter(name="kerberos5ReadOnly")
def kerberos5_read_only(self) -> Optional[bool]:
"""
Kerberos5 Read only access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5_read_only")
@property
@pulumi.getter(name="kerberos5ReadWrite")
def kerberos5_read_write(self) -> Optional[bool]:
"""
Kerberos5 Read and write access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5_read_write")
@property
@pulumi.getter(name="kerberos5iReadOnly")
def kerberos5i_read_only(self) -> Optional[bool]:
"""
Kerberos5i Read only access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5i_read_only")
@property
@pulumi.getter(name="kerberos5iReadWrite")
def kerberos5i_read_write(self) -> Optional[bool]:
"""
Kerberos5i Read and write access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5i_read_write")
@property
@pulumi.getter(name="kerberos5pReadOnly")
def kerberos5p_read_only(self) -> Optional[bool]:
"""
Kerberos5p Read only access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5p_read_only")
@property
@pulumi.getter(name="kerberos5pReadWrite")
def kerberos5p_read_write(self) -> Optional[bool]:
"""
Kerberos5p Read and write access. To be use with swagger version 2020-05-01 or later
"""
return pulumi.get(self, "kerberos5p_read_write")
@property
@pulumi.getter
def nfsv3(self) -> Optional[bool]:
"""
Allows NFSv3 protocol. Enable only for NFSv3 type volumes
"""
return pulumi.get(self, "nfsv3")
@property
@pulumi.getter
def nfsv41(self) -> Optional[bool]:
"""
Allows NFSv4.1 protocol. Enable only for NFSv4.1 type volumes
"""
return pulumi.get(self, "nfsv41")
@property
@pulumi.getter(name="ruleIndex")
def rule_index(self) -> Optional[int]:
"""
Order index
"""
return pulumi.get(self, "rule_index")
@property
@pulumi.getter(name="unixReadOnly")
def unix_read_only(self) -> Optional[bool]:
"""
Read only access
"""
return pulumi.get(self, "unix_read_only")
@property
@pulumi.getter(name="unixReadWrite")
def unix_read_write(self) -> Optional[bool]:
"""
Read and write access
"""
return pulumi.get(self, "unix_read_write")
@pulumi.output_type
class HourlyScheduleResponse(dict):
"""
Hourly Schedule properties
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "snapshotsToKeep":
suggest = "snapshots_to_keep"
elif key == "usedBytes":
suggest = "used_bytes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HourlyScheduleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HourlyScheduleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HourlyScheduleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
minute: Optional[int] = None,
snapshots_to_keep: Optional[int] = None,
used_bytes: Optional[float] = None):
"""
Hourly Schedule properties
:param int minute: Indicates which minute snapshot should be taken
:param int snapshots_to_keep: Hourly snapshot count to keep
:param float used_bytes: Resource size in bytes, current storage usage for the volume in bytes
"""
if minute is not None:
pulumi.set(__self__, "minute", minute)
if snapshots_to_keep is not None:
pulumi.set(__self__, "snapshots_to_keep", snapshots_to_keep)
if used_bytes is | |
"term": 2,
"course_project": False
},
{
"id": 4177,
"name": "Комплексные системы защиты информации",
"term": 8,
"course_project": False
},
{
"id": 474,
"name": "Комплексный курсовой проект",
"term": 8,
"course_project": False
},
{
"id": 474,
"name": "Комплексный курсовой проект",
"term": 10,
"course_project": False
},
{
"id": 34089,
"name": "Композиция и проектирование оптических систем / Composing and optical system design",
"term": 4,
"course_project": False
},
{
"id": 25523,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 25523,
"name": "Компьютерная визуализация",
"term": 6,
"course_project": False
},
{
"id": 25503,
"name": "Компьютерная геометрия и графика",
"term": 4,
"course_project": False
},
{
"id": 25504,
"name": "Компьютерная графика реального времени",
"term": 2,
"course_project": False
},
{
"id": 25509,
"name": "<NAME>",
"term": 4,
"course_project": False
},
{
"id": 25525,
"name": "Компьютерное зрение",
"term": 2,
"course_project": False
},
{
"id": 25525,
"name": "Компьютерное зрение",
"term": 6,
"course_project": False
},
{
"id": 30794,
"name": "Компьютерное зрение в робототехнических приложениях / Computer vision for robotic applications",
"term": 2,
"course_project": False
},
{
"id": 31538,
"name": "Компьютерное проектирование низкотемпературных систем",
"term": 2,
"course_project": False
},
{
"id": 1505,
"name": "Компьютерное управление мехатронными системами",
"term": 8,
"course_project": False
},
{
"id": 25488,
"name": "Компьютерные методы и моделирование в химии материалов/ Computational methods and modeling in materials chemistry",
"term": 2,
"course_project": False
},
{
"id": 25488,
"name": "Компьютерные методы и моделирование в химии материалов/ Computational methods and modeling in materials chemistry",
"term": 4,
"course_project": False
},
{
"id": 490,
"name": "Компьютерные сети",
"term": 6,
"course_project": False
},
{
"id": 490,
"name": "<NAME>",
"term": 8,
"course_project": False
},
{
"id": 25526,
"name": "<NAME>и",
"term": 6,
"course_project": False
},
{
"id": 25520,
"name": "Компьютерные технологии моделирования физических процессов",
"term": 2,
"course_project": False
},
{
"id": 27267,
"name": "Компьютерные технологии мультимедиа",
"term": 2,
"course_project": False
},
{
"id": 34069,
"name": "Конструирование и точностной анализ оптических приборов",
"term": 2,
"course_project": False
},
{
"id": 34069,
"name": "Конструирование и точностной анализ оптических приборов",
"term": 2,
"course_project": True
},
{
"id": 30858,
"name": "Конструирование и точностной анализ оптических приборов /Design of optical devices and systems+project",
"term": 2,
"course_project": False
},
{
"id": 30858,
"name": "Конструирование и точностной анализ оптических приборов /Design of optical devices and systems+project",
"term": 2,
"course_project": True
},
{
"id": 25532,
"name": "Конструирование оптических приборов",
"term": 6,
"course_project": False
},
{
"id": 25532,
"name": "Конструирование оптических приборов",
"term": 6,
"course_project": True
},
{
"id": 6713,
"name": "Конструирование технологической оснастки",
"term": 8,
"course_project": False
},
{
"id": 1524,
"name": "Конструкторско-технологическое обеспечение производства ЭВМ",
"term": 8,
"course_project": False
},
{
"id": 31659,
"name": "Конструкторско-технологическое обеспечение точности в машиностроении",
"term": 4,
"course_project": False
},
{
"id": 34550,
"name": "Контроль качества изделий цифрового производства",
"term": 2,
"course_project": False
},
{
"id": 9893,
"name": "Контроль качества продукции пищевых и биотехнологических производств",
"term": 8,
"course_project": False
},
{
"id": 9893,
"name": "Контроль качества продукции пищевых и биотехнологических производств",
"term": 10,
"course_project": False
},
{
"id": 25535,
"name": "Конфликтология",
"term": 6,
"course_project": False
},
{
"id": 29644,
"name": "Концепция развития низкотемпературных систем",
"term": 4,
"course_project": False
},
{
"id": 33177,
"name": "Конъюнктура и направления политики / Situation and Policies",
"term": 4,
"course_project": False
},
{
"id": 34472,
"name": "Корпоративная коммуникация",
"term": 2,
"course_project": False
},
{
"id": 25537,
"name": "Корпоративные информационные системы",
"term": 6,
"course_project": False
},
{
"id": 25541,
"name": "Корпоративный экологический менеджмент",
"term": 2,
"course_project": False
},
{
"id": 35650,
"name": "Корпоративный экологический менеджмент / Corporate Environmental Management",
"term": 2,
"course_project": False
},
{
"id": 25542,
"name": "Креативные технологии",
"term": 2,
"course_project": False
},
{
"id": 25542,
"name": "Креативные технологии",
"term": 4,
"course_project": False
},
{
"id": 25542,
"name": "Креативные технологии",
"term": 6,
"course_project": False
},
{
"id": 5177,
"name": "Криогенные машины",
"term": 8,
"course_project": False
},
{
"id": 5176,
"name": "Криогенные системы и установки",
"term": 8,
"course_project": False
},
{
"id": 3729,
"name": "Криптографические методы защиты информации",
"term": 8,
"course_project": False
},
{
"id": 16856,
"name": "Криптографические методы и средства обеспечения информационной безопасности инфокоммуникаций",
"term": 8,
"course_project": False
},
{
"id": 34464,
"name": "Криптографические средства обеспечения информационной безопасности",
"term": 2,
"course_project": False
},
{
"id": 35549,
"name": "Критическая теория цифровых медиа / Critical Theory of Digital Media",
"term": 2,
"course_project": False
},
{
"id": 523,
"name": "Культурология",
"term": 4,
"course_project": False
},
{
"id": 523,
"name": "Культурология",
"term": 6,
"course_project": False
},
{
"id": 28031,
"name": "Кураторство в научных музеях и центрах популяризации науки",
"term": 2,
"course_project": False
},
{
"id": 40435,
"name": "Курс по ревёрс-инжинирингу",
"term": 2,
"course_project": False
},
{
"id": 40435,
"name": "Курс по ревёрс-инжинирингу",
"term": 4,
"course_project": False
},
{
"id": 40435,
"name": "Курс по ревёрс-инжинирингу",
"term": 6,
"course_project": False
},
{
"id": 25556,
"name": "Лаборатория академического письма",
"term": 2,
"course_project": False
},
{
"id": 25556,
"name": "Лаборатория академического письма",
"term": 6,
"course_project": False
},
{
"id": 18936,
"name": "Лазерная технология в медико-биологических исследованиях",
"term": 8,
"course_project": False
},
{
"id": 34237,
"name": "Лазерные конденсированные среды",
"term": 2,
"course_project": False
},
{
"id": 34237,
"name": "Лазерные конденсированные среды",
"term": 2,
"course_project": True
},
{
"id": 30891,
"name": "Лазерные конденсированные среды / Laser condensed matter",
"term": 2,
"course_project": False
},
{
"id": 25559,
"name": "Лазерные микро- и нанотехнологии",
"term": 2,
"course_project": False
},
{
"id": 276,
"name": "Лазерные технологии",
"term": 6,
"course_project": False
},
{
"id": 25566,
"name": "Лазерные технологии",
"term": 6,
"course_project": False
},
{
"id": 25570,
"name": "Лазерные, нелинейные и регистрирующие среды в фемтотехнологиях",
"term": 6,
"course_project": False
},
{
"id": 25564,
"name": "Лазерные, нелинейные, регистрирующие среды",
"term": 6,
"course_project": False
},
{
"id": 18953,
"name": "Лазеры для индустриальных применений",
"term": 8,
"course_project": False
},
{
"id": 25574,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 36229,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 283,
"name": "Логистика",
"term": 8,
"course_project": False
},
{
"id": 25582,
"name": "Магистральные сети передачи данных",
"term": 2,
"course_project": False
},
{
"id": 32969,
"name": "Майнинг социальных данных и методы их анализа / Social Data Mining and Analysis",
"term": 2,
"course_project": False
},
{
"id": 19730,
"name": "Маркетинг в инновационных сферах и отраслях",
"term": 8,
"course_project": False
},
{
"id": 32343,
"name": "Маркетинг в системе здравоохранения",
"term": 2,
"course_project": False
},
{
"id": 34857,
"name": "<NAME>ых вызовов",
"term": 2,
"course_project": False
},
{
"id": 25593,
"name": "Маркетинговые исследования на основе патентной информации",
"term": 2,
"course_project": False
},
{
"id": 25593,
"name": "Маркетинговые исследования на основе патентной информации",
"term": 2,
"course_project": True
},
{
"id": 34405,
"name": "Маркетинговые технологии в цифровой экономике",
"term": 2,
"course_project": False
},
{
"id": 302,
"name": "Математика",
"term": 2,
"course_project": False
},
{
"id": 302,
"name": "Математика",
"term": 4,
"course_project": False
},
{
"id": 25597,
"name": "Математика",
"term": 2,
"course_project": False
},
{
"id": 31368,
"name": "Математика (базовый уровень)",
"term": 2,
"course_project": False
},
{
"id": 31368,
"name": "Математика (базовый уровень)",
"term": 4,
"course_project": False
},
{
"id": 31369,
"name": "Математика (продвинутый уровень)",
"term": 2,
"course_project": False
},
{
"id": 25598,
"name": "Математическая лингвистика",
"term": 6,
"course_project": False
},
{
"id": 25600,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 25600,
"name": "<NAME>",
"term": 4,
"course_project": False
},
{
"id": 25600,
"name": "<NAME>",
"term": 4,
"course_project": True
},
{
"id": 25599,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 25599,
"name": "<NAME>",
"term": 4,
"course_project": False
},
{
"id": 29649,
"name": "<NAME>",
"term": 4,
"course_project": False
},
{
"id": 29649,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 25601,
"name": "Математические методы в физике",
"term": 2,
"course_project": False
},
{
"id": 35989,
"name": "Математические методы в физике / Mathematical Methods for Physicists",
"term": 2,
"course_project": False
},
{
"id": 32420,
"name": "Математические методы в экономике",
"term": 4,
"course_project": False
},
{
"id": 34921,
"name": "Математические методы и модели принятия управленческих решений",
"term": 2,
"course_project": False
},
{
"id": 25614,
"name": "Математические методы компьютерных технологий в научных исследованиях",
"term": 2,
"course_project": False
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: Bpsk Mod
# Author: <NAME>
# Copyright: MIT
# GNU Radio version: 3.8.3.1
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print("Warning: failed to XInitThreads()")
from PyQt5 import Qt
from gnuradio import qtgui
from gnuradio.filter import firdes
import sip
from gnuradio import blocks
import pmt
from gnuradio import digital
from gnuradio import gr
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
from gnuradio.qtgui import Range, RangeWidget
from gnuradio import qtgui
class bpsk_mod(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Bpsk Mod")
Qt.QWidget.__init__(self)
self.setWindowTitle("Bpsk Mod")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "bpsk_mod")
try:
if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
self.restoreGeometry(self.settings.value("geometry").toByteArray())
else:
self.restoreGeometry(self.settings.value("geometry"))
except:
pass
##################################################
# Variables
##################################################
self.sps = sps = 4
self.nfilts = nfilts = 4
self.tunning = tunning = 50
self.taps = taps = [1.0, 0.25-0.25j, 0.50 + 0.10j, -0.3 + 0.2j]
self.samp_rate = samp_rate = 50e3
self.rrc_taps = rrc_taps = firdes.root_raised_cosine(nfilts, nfilts, 1.0/float(sps), 0.35, 45*nfilts)
self.psk_const = psk_const = digital.constellation_8psk().base()
self.phase_bw = phase_bw = 62.8e-3
self.ntaps = ntaps = 15
self.noise = noise = 50
self.loop_order = loop_order = 2
self.excess_bw = excess_bw = 0.350
self.delay_tx = delay_tx = 25
##################################################
# Blocks
##################################################
self._delay_tx_range = Range(5, 100, 1, 25, 200)
self._delay_tx_win = RangeWidget(self._delay_tx_range, self.set_delay_tx, 'delay_tx', "counter_slider", float)
self.top_grid_layout.addWidget(self._delay_tx_win, 0, 2, 1, 2)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._tunning_range = Range(0, 100, 1, 50, 200)
self._tunning_win = RangeWidget(self._tunning_range, self.set_tunning, 'tunning', "counter_slider", float)
self.top_grid_layout.addWidget(self._tunning_win, 0, 1, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_2_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
'Received Signal', #name
1 #number of inputs
)
self.qtgui_time_sink_x_2_0.set_update_time(0.10)
self.qtgui_time_sink_x_2_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_2_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_2_0.enable_tags(True)
self.qtgui_time_sink_x_2_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_2_0.enable_autoscale(False)
self.qtgui_time_sink_x_2_0.enable_grid(False)
self.qtgui_time_sink_x_2_0.enable_axis_labels(True)
self.qtgui_time_sink_x_2_0.enable_control_panel(False)
self.qtgui_time_sink_x_2_0.enable_stem_plot(False)
labels = ['Signal 1', 'Signal 2', 'Signal 3', 'Signal 4', 'Signal 5',
'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ['blue', 'red', 'green', 'black', 'cyan',
'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
for i in range(2):
if len(labels[i]) == 0:
if (i % 2 == 0):
self.qtgui_time_sink_x_2_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_2_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_2_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_2_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_2_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_2_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_2_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_2_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_2_0_win = sip.wrapinstance(self.qtgui_time_sink_x_2_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_2_0_win, 2, 0, 1, 4)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_2 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
'Transmitted Signal', #name
1 #number of inputs
)
self.qtgui_time_sink_x_2.set_update_time(0.10)
self.qtgui_time_sink_x_2.set_y_axis(-1, 1)
self.qtgui_time_sink_x_2.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_2.enable_tags(True)
self.qtgui_time_sink_x_2.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_2.enable_autoscale(False)
self.qtgui_time_sink_x_2.enable_grid(False)
self.qtgui_time_sink_x_2.enable_axis_labels(True)
self.qtgui_time_sink_x_2.enable_control_panel(False)
self.qtgui_time_sink_x_2.enable_stem_plot(False)
labels = ['Signal 1', 'Signal 2', 'Signal 3', 'Signal 4', 'Signal 5',
'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ['blue', 'red', 'green', 'black', 'cyan',
'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
for i in range(2):
if len(labels[i]) == 0:
if (i % 2 == 0):
self.qtgui_time_sink_x_2.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_2.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_2.set_line_label(i, labels[i])
self.qtgui_time_sink_x_2.set_line_width(i, widths[i])
self.qtgui_time_sink_x_2.set_line_color(i, colors[i])
self.qtgui_time_sink_x_2.set_line_style(i, styles[i])
self.qtgui_time_sink_x_2.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_2.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_2_win = sip.wrapinstance(self.qtgui_time_sink_x_2.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_2_win, 1, 0, 1, 4)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
'Input output signal', #name
3 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
labels = ['Rx', 'Offset', 'Tx', 'Signal 4', 'Signal 5',
'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ['blue', 'red', 'green', 'black', 'cyan',
'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
for i in range(3):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win, 5, 0, 1, 4)
for r in range(5, 6):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0_0 = qtgui.const_sink_c(
1024, #size
'Constalletion Receiver', #name
1 #number of inputs
)
self.qtgui_const_sink_x_0_0.set_update_time(0.10)
self.qtgui_const_sink_x_0_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0_0.enable_autoscale(False)
self.qtgui_const_sink_x_0_0.enable_grid(False)
self.qtgui_const_sink_x_0_0.enable_axis_labels(True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in range(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_0_win, 3, 2, 2, 2)
for r in range(3, 5):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
1024, #size
'Constallation Transmitter', #name
1 #number of inputs
)
self.qtgui_const_sink_x_0.set_update_time(0.10)
self.qtgui_const_sink_x_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0.enable_autoscale(False)
self.qtgui_const_sink_x_0.enable_grid(False)
self.qtgui_const_sink_x_0.enable_axis_labels(True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in range(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_win, 3, 0, 2, 2)
for r in range(3, 5):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self._noise_range = Range(0, 100, 1, 50, 200)
self._noise_win = RangeWidget(self._noise_range, self.set_noise, 'noise', "counter_slider", float)
self.top_grid_layout.addWidget(self._noise_win, 0, 0, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self.digital_pfb_clock_sync_xxx_0 = digital.pfb_clock_sync_ccf(sps, phase_bw, rrc_taps, 32, 16, 1.5, 8)
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.digital_costas_loop_cc_0 = digital.costas_loop_cc(phase_bw, 8, False)
self.digital_constellation_modulator_0 = digital.generic_mod(
constellation=psk_const,
differential=True,
samples_per_symbol=4,
pre_diff_code=True,
excess_bw=0.35,
verbose=False,
log=False)
self.digital_constellation_decoder_cb_0 = digital.constellation_decoder_cb(psk_const)
self.digital_cma_equalizer_cc_0 = digital.cma_equalizer_cc(15, 1, 0.01, 8)
self.blocks_unpacked_to_packed_xx_0_0_0 = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST)
self.blocks_unpacked_to_packed_xx_0_0 = blocks.unpacked_to_packed_bb(8, gr.GR_MSB_FIRST)
self.blocks_unpack_k_bits_bb_0 = blocks.unpack_k_bits_bb(8)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_char*1, '/home/supersonic/GitHub/Post-Shannon-SDR/Examples/BPSK/testFiles/test_file', True, 0, 0)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_char*1, '/home/supersonic/GitHub/Post-Shannon-SDR/Examples/BPSK/testFiles/test_file_rx', False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blocks_delay_0 = blocks.delay(gr.sizeof_float*1, delay_tx)
self.blocks_char_to_float_1 = blocks.char_to_float(1, 1)
self.blocks_char_to_float_0 = blocks.char_to_float(1, 1)
##################################################
# Connections
##################################################
self.connect((self.blocks_char_to_float_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.blocks_char_to_float_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_char_to_float_1, 0), (self.blocks_delay_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.blocks_delay_0, 0), (self.qtgui_time_sink_x_0, 2))
self.connect((self.blocks_file_source_0, 0), (self.blocks_unpack_k_bits_bb_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.digital_constellation_modulator_0, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_throttle_0, 0), (self.digital_pfb_clock_sync_xxx_0, 0))
self.connect((self.blocks_unpack_k_bits_bb_0, 0), (self.blocks_char_to_float_1, 0))
self.connect((self.blocks_unpacked_to_packed_xx_0_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_unpacked_to_packed_xx_0_0_0, 0), (self.blocks_unpacked_to_packed_xx_0_0, 0))
self.connect((self.digital_cma_equalizer_cc_0, 0), (self.digital_costas_loop_cc_0, 0))
self.connect((self.digital_constellation_decoder_cb_0, 0), (self.digital_diff_decoder_bb_0, 0))
self.connect((self.digital_constellation_modulator_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.digital_constellation_decoder_cb_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.qtgui_const_sink_x_0_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.qtgui_time_sink_x_2_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.blocks_char_to_float_0, 0))
self.connect((self.digital_diff_decoder_bb_0, | |
#1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Return the anomalous_timeseries as an array to sample and fp_id_matched
anomalous_timeseries, fp_id_matched,
# @added 20170401 - Task #1988: Review - Ionosphere layers - added fp_details_list
# Feature #1960: ionosphere_layers
fp_details_list)
# @modified 20170114 - Feature #1854: Ionosphere learn
# DEPRECATED create_features_profile here as this function has been migrated in
# order to decouple the creation of features profiles from the webapp as
# ionosphere/learn now requires access to this function as well. Moved to a
# shared function in ionosphere_functions.py
# REMOVED
# def create_features_profile(requested_timestamp, data_for_metric, context):
def features_profile_details(fp_id):
"""
Get the Ionosphere details of a fetures profile
:param fp_id: the features profile id
:type fp_id: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_details'
trace = 'none'
fail_msg = 'none'
fp_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
ionosphere_table = None
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for fp_id %s details' % str(fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: ionosphere_table OK' % function_str)
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(ionosphere_table.c.id == int(fp_id))
result = connection.execute(stmt)
row = result.fetchone()
fp_details_object = row
connection.close()
try:
tsfresh_version = row['tsfresh_version']
except:
tsfresh_version = 'unknown'
try:
calc_time = row['calc_time']
except:
calc_time = 'unknown'
full_duration = row['full_duration']
features_count = row['features_count']
features_sum = row['features_sum']
deleted = row['deleted']
matched_count = row['matched_count']
last_matched = row['last_matched']
if str(last_matched) == '0':
human_date = 'never matched'
else:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_matched)))
created_timestamp = row['created_timestamp']
full_duration = row['full_duration']
# @modified 20161229 - Feature #1830: Ionosphere alerts
# Added checked_count and last_checked
last_checked = row['last_checked']
if str(last_checked) == '0':
checked_human_date = 'never checked'
else:
checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_checked)))
checked_count = row['checked_count']
# @modified 20170114 - Feature #1854: Ionosphere learn
# Added parent_id and generation
parent_id = row['parent_id']
generation = row['generation']
# @added 20170402 - Feature #2000: Ionosphere - validated
validated = row['validated']
# @added 20170305 - Feature #1960: ionosphere_layers
layers_id = row['layers_id']
fp_details = '''
tsfresh_version :: %s | calc_time :: %s
features_count :: %s
features_sum :: %s
deleted :: %s
matched_count :: %s
last_matched :: %s | human_date :: %s
created_timestamp :: %s
full_duration :: %s
checked_count :: %s
last_checked :: %s | human_date :: %s
parent_id :: %s | generation :: %s | validated :: %s
layers_id :: %s
''' % (str(tsfresh_version), str(calc_time), str(features_count),
str(features_sum), str(deleted), str(matched_count),
str(last_matched), str(human_date), str(created_timestamp),
str(full_duration), str(checked_count), str(last_checked),
str(checked_human_date), str(parent_id), str(generation),
str(validated), str(layers_id))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get fp_id %s details from ionosphere DB table' % str(fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
# @modified 20170114 - Feature #1854: Ionosphere learn - generations
# Return the fp_details_object so that webapp can pass the parent_id and
# generation to the templates
# return fp_details, True, fail_msg, trace
return fp_details, True, fail_msg, trace, fp_details_object
# @added 20170118 - Feature #1862: Ionosphere features profiles search page
# Added fp_search parameter
# @modified 20170220 - Feature #1862: Ionosphere features profiles search page
def ionosphere_search(default_query, search_query):
"""
Gets the details features profiles from the database, using the URL arguments
that are passed in by the :obj:`request.args` to build the MySQL select
query string and queries the database, parse the results and creates an
array of the features profiles that matched the query.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
import time
import datetime
function_str = 'ionoshere_backend.py :: ionosphere_search'
trace = 'none'
fail_msg = 'none'
full_duration_list = []
enabled_list = []
tsfresh_version_list = []
generation_list = []
features_profiles = []
features_profiles_count = []
# possible_options = [
# 'full_duration', 'enabled', 'tsfresh_version', 'generation', 'count']
logger.info('determining search parameters')
query_string = 'SELECT * FROM ionosphere'
# id, metric_id, full_duration, anomaly_timestamp, enabled, tsfresh_version,
# calc_time, features_sum, matched_count, last_matched, created_timestamp,
# last_checked, checked_count, parent_id, generation
needs_and = False
count_request = False
matched_count = None
checked_count = None
generation_count = None
count_by_metric = None
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
count_request = True
count_by_metric = True
features_profiles_count = []
query_string = 'SELECT COUNT(*), metric_id FROM ionosphere GROUP BY metric_id'
else:
count_by_metric = False
count_by_matched = None
if 'count_by_matched' in request.args:
count_by_matched = request.args.get('count_by_matched', None)
if count_by_matched and count_by_matched != 'false':
count_request = True
count_by_matched = True
matched_count = []
# query_string = 'SELECT COUNT(*), id FROM ionosphere GROUP BY matched_count ORDER BY COUNT(*)'
query_string = 'SELECT matched_count, id FROM ionosphere ORDER BY matched_count'
else:
count_by_matched = False
count_by_checked = None
if 'count_by_checked' in request.args:
count_by_checked = request.args.get('count_by_checked', None)
if count_by_checked and count_by_checked != 'false':
count_request = True
count_by_checked = True
checked_count = []
query_string = 'SELECT COUNT(*), id FROM ionosphere GROUP BY checked_count ORDER BY COUNT(*)'
query_string = 'SELECT checked_count, id FROM ionosphere ORDER BY checked_count'
else:
count_by_checked = False
count_by_generation = None
if 'count_by_generation' in request.args:
count_by_generation = request.args.get('count_by_generation', None)
if count_by_generation and count_by_generation != 'false':
count_request = True
count_by_generation = True
generation_count = []
query_string = 'SELECT COUNT(*), generation FROM ionosphere GROUP BY generation ORDER BY COUNT(*)'
else:
count_by_generation = False
get_metric_profiles = None
metric = None
if 'metric' in request.args:
metric = request.args.get('metric', None)
if metric and metric != 'all' and metric != '*':
# A count_request always takes preference over a metric
if not count_request:
get_metric_profiles = True
query_string = 'SELECT * FROM ionosphere WHERE metric_id=REPLACE_WITH_METRIC_ID'
needs_and = True
else:
new_query_string = 'SELECT * FROM ionosphere WHERE metric_id=REPLACE_WITH_METRIC_ID'
query_string = new_query_string
needs_and = True
if 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp and from_timestamp != 'all':
if ":" in from_timestamp:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
from_timestamp = str(int(new_from_timestamp))
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate from_timestamp
try:
validate_from_timestamp = int(from_timestamp) + 1
int_from_timestamp = validate_from_timestamp - 1
validated_from_timestamp = str(int_from_timestamp)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate from_timestamp'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, from_timestamp)
new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, validate_from_timestamp)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, from_timestamp)
new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, validated_from_timestamp)
query_string = new_query_string
needs_and = True
if 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
until_timestamp = str(int(new_until_timestamp))
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate until_timestamp
try:
validate_until_timestamp = int(until_timestamp) + 1
int_until_timestamp = validate_until_timestamp - 1
validated_until_timestamp = str(int_until_timestamp)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate until_timestamp'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# | |
"""*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
from os.path import join
Log.writeInfoMessage( "Loading Interrupt Manager for " + Variables.get( "__PROCESSOR" ) )
################################################################################
#### Public Globals -- variables used in this module and accessible from other files
################################################################################
global getInterruptName
global interruptNamespace
global interruptSymbolEnable
global interruptSymbolHandler
global interruptSymbolHandlerLock
global interruptLastNameEnable
global interruptLastNameHandler
global interruptLastNameLock
interruptNamespace = "core"
interruptLastNameEnable = "_INTERRUPT_ENABLE"
interruptLastNameHandler = "_INTERRUPT_HANDLER"
interruptLastNameLock = "_INTERRUPT_HANDLER_LOCK"
################################################################################
global showSharedVectorsInMenu
global numSharedVectors
global sharedVectors
global subVectorToSharedVector
showSharedVectorsInMenu = False
numSharedVectors = 0
sharedVectors = {}
subVectorToSharedVector = {}
################################################################################
#### Static Globals -- variables intended to be used inside this file only
################################################################################
# not currently public
global interruptsChildren
global interruptLastNameMapType
global interruptLastNameVector
global interruptLastNameSrcType
global interruptLastNamePriority
global aicMenuTitle
global aicRedirectionVisibility
global aicMapTypeVisibility
global aicPriorityOutputMode
global aicPriorityChoices
global aicSrcTypes
global aicMinPriorityName
global aicMaxPriorityName
interruptLastNameMapType = "_INTERRUPT_MAP_TYPE"
interruptLastNameVector = "_INTERRUPT_VECTOR"
interruptLastNameSrcType = "_INTERRUPT_SRC_TYPE"
interruptLastNamePriority = "_INTERRUPT_PRIORITY"
interruptsChildren = ATDF.getNode( "/avr-tools-device-file/devices/device/interrupts" ).getChildren()
aicMenuTitle = ""
aicRedirectionVisibility = False
aicMapTypeVisibility = False
aicPriorityOutputMode = ""
aicPriorityChoices = []
aicSrcTypes = []
aicMinPriorityName = ""
aicMaxPriorityName = ""
aicCodeGenerationDependencies = []
neverSecureList = []
alwaysSecureList = []
programmedSecureList = []
externalList = []
################################################################################
#### Global Methods
################################################################################
def getInterruptName( interruptNode ):
if "header:alternate-name" in interruptNode.getAttributeList():
retval = interruptNode.getAttribute( "header:alternate-name" )
else:
retval = interruptNode.getAttribute( "name" )
return( str( retval ) )
################################################################################
#### Local Methods
################################################################################
def getInterruptDescription( interruptNode ):
if "header:alternate-caption" in interruptNode.getAttributeList():
retval = interruptNode.getAttribute( "header:alternate-caption" )
else:
retval = interruptNode.getAttribute( "caption" )
return( str( retval ) )
global getNameValueCaptionTuple
def getNameValueCaptionTuple( aGroupName, aTupleArray ):
choiceNode = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"AIC\"]/value-group@[name=\"" + aGroupName + "\"]")
if choiceNode:
choiceValues = choiceNode.getChildren()
del aTupleArray[:]
for ii in range( 0, len( choiceValues ) ):
aTupleArray.append( ( choiceValues[ ii ].getAttribute("name"),
choiceValues[ ii ].getAttribute("value"),
choiceValues[ ii ].getAttribute("caption")
) )
def getTupleNameContaining( aTupleArray, aString ):
tupleName = ""
if len( aTupleArray ):
tupleName = aTupleArray[ 0 ][ 0 ]
aString = aString.upper()
for tuple in aTupleArray:
if( aString in tuple[ 0 ].upper() ):
tupleName = tuple[ 0 ]
break
return tupleName
def aicMapTypeRedirectionCallback( aicMapType, eventDictionary ):
if( True == eventDictionary[ "value" ] ):
# Mapping Secure to NonSecure
if( ("AlwaysSecure" == aicMapType.getDefaultValue())
or ("Secure" == aicMapType.getDefaultValue())
):
aicMapType.setValue( "RedirectedToNonSecure", 1 ) # make change evident for user
else:
if( ("AlwaysSecure" == aicMapType.getDefaultValue())
or ("Secure" == aicMapType.getDefaultValue())
):
aicMapType.clearValue() # restore the default value
def priorityMapTypeCallback( aicVectorPriority, eventDictionary ):
global aicMaxPriorityName
if( ("AlwaysSecure" == eventDictionary[ "value" ])
or ("Secure" == eventDictionary[ "value" ])
):
aicVectorPriority.setSelectedKey( aicMaxPriorityName, 0 )
aicVectorPriority.setVisible( False )
else:
aicVectorPriority.setVisible( True )
def aicCodeGenerationCallback( aicCodeGeneration, eventDictionary ):
global interruptLastNameEnable
# Interrupt enables and map type determine the code generation to be done later
secureCount = 0
nonSecureCount = 0
for interrupt in interruptsChildren:
interruptName = getInterruptName( interrupt )
component = aicCodeGeneration.getComponent()
enableSymbol = component.getSymbolByID( interruptName + interruptLastNameEnable )
if( enableSymbol.getValue() ):
mapTypeSymbol = component.getSymbolByID( interruptName + interruptLastNameMapType )
if( ("NeverSecure" == mapTypeSymbol.value)
or ("NonSecure" == mapTypeSymbol.value)
or ("RedirectedToNonSecure" == mapTypeSymbol.value)
):
nonSecureCount = nonSecureCount + 1
else:
secureCount = secureCount + 1
if secureCount and nonSecureCount:
aicCodeGeneration.setValue( "AICandSAIC", 0xFF )
elif nonSecureCount:
aicCodeGeneration.setValue( "AIC", 0xFF )
elif secureCount:
aicCodeGeneration.setValue( "SAIC", 0xFF )
else:
aicCodeGeneration.setValue( "NONE", 0xFF )
global aicVectorEnableCallback
def aicVectorEnableCallback( aicVectorEnable, eventDictionary ):
global sharedVectors
desiredValue = eventDictionary[ "value" ]
interrupt = eventDictionary[ "id" ].replace( interruptLastNameLock, "" ).replace( interruptLastNameEnable, "" )
aicVectorEnable.setReadOnly( True )
if aicVectorEnable.getDefaultValue() == desiredValue:
aicVectorEnable.clearValue()
else:
aicVectorEnable.setValue( desiredValue, 1 )
aicVectorEnable.setReadOnly( False )
sharedInterrupt = subVectorToSharedVector.get( interrupt )
if( sharedInterrupt ):
# check if any sibling is enabled
component = aicVectorEnable.getComponent()
desiredValue = False
for elem in sharedVectors[ sharedInterrupt ]:
vectorEnable = component.getSymbolByID( elem + interruptLastNameEnable )
if vectorEnable and vectorEnable.getValue():
desiredValue = True
aicVectorEnable = component.getSymbolByID( sharedInterrupt + interruptLastNameEnable )
aicVectorEnable.setValue( desiredValue, 1 )
def setupEnableAndHandler( component, anInterrupt, aicVectorEnable, aicVectorHandler ):
global sharedVectors
enableDependencies = []
interruptName = getInterruptName( anInterrupt )
moduleInstance = anInterrupt.getAttribute( "module-instance" ).split()
sharedVectorMaxShares = len( moduleInstance )
if 1 < sharedVectorMaxShares:
aicVectorHandler.setReadOnly( True )
aicVectorHandler.setValue( interruptName + "_SharedHandler", 0 )
aicVectorHandler.setReadOnly( False )
sharedVectors[ interruptName ] = moduleInstance
aicVectorHandler.setVisible( False )
for elem in moduleInstance:
subVectorToSharedVector[ elem ] = interruptName
subVectorEnable = component.createBooleanSymbol( elem + interruptLastNameEnable, aicVectorEnable )
subVectorEnable.setLabel( "Enable " + elem )
subVectorEnable.setDefaultValue( False )
subVectorEnable.setDependencies( aicVectorEnableCallback, [elem + interruptLastNameLock] )
enableDependencies.append( elem + interruptLastNameEnable ) # Parent enable depends on children
subVectorHandlerLock = component.createBooleanSymbol( elem + interruptLastNameLock, subVectorEnable )
subVectorHandlerLock.setDefaultValue( False )
subVectorHandlerLock.setVisible( False )
subVectorHandler = component.createStringSymbol( elem + interruptLastNameHandler, subVectorEnable )
subVectorHandler.setLabel( elem + " Handler" )
subVectorHandler.setDefaultValue( elem + "_Handler" )
enableDependencies.append( interruptName + interruptLastNameLock )
aicVectorEnable.setDependencies( aicVectorEnableCallback, enableDependencies )
def setupSharedVectorFtlSymbols( component, anInterrupt, aicVectorEnable ):
global showSharedVectorsInMenu
global numSharedVectors
interruptName = getInterruptName( anInterrupt )
moduleInstance = anInterrupt.getAttribute( "module-instance" ).split()
numShares = len( moduleInstance )
if 1 < numShares:
numSharedVectors = numSharedVectors + 1
# SHARED_VECTOR_N = "name", e.g. SHARED_VECTOR_1 = "SYSC"
# Create a generic shared handler symbol with a value indicating the HANDLER
sharedVector = component.createStringSymbol( "SHARED_VECTOR_" + str( numSharedVectors - 1 ), aicVectorEnable )
Database.clearSymbolValue( "core", interruptName + "SHARED_VECTOR_" + str( numSharedVectors - 1 ) )
sharedVector.setDefaultValue( interruptName )
sharedVector.setVisible( False )
sharedVectorNumShares = component.createIntegerSymbol( interruptName + "_NUM_SHARES", sharedVector )
sharedVectorNumShares.setMin( numShares )
sharedVectorNumShares.setMax( numShares )
Database.clearSymbolValue( "core", interruptName + "_NUM_SHARES" )
sharedVectorNumShares.setValue( numShares, 0 )
sharedVectorNumShares.setVisible( showSharedVectorsInMenu )
# Create symbols for the shared handler names
# {SHARED_VECTOR_#}_HANDLER_#, e.g.
# SYSC_HANDLER_0 = "PMC" ==> PMC_InterruptHandler
# SYSC_HANDLER_1 = "RSTC" ==> RSTC_InterruptHandler
# SYSC_HANDLER_2 = "RTC" ==> RTC_InterruptHandler
ii = 0
for elem in moduleInstance:
shareName = component.createStringSymbol( interruptName + "_SHARE_" + str( ii ), aicVectorEnable )
shareName.setDefaultValue( elem )
shareName.setVisible( showSharedVectorsInMenu )
ii = ii + 1
def formAicPyGlobalData( theProcessor, theCoreComponent ):
global getNameValueCaptionTuple
global aicMenuTitle
global aicRedirectionVisibility
global aicMapTypeVisibility
global aicPriorityOutputMode
global aicPriorityChoices
global aicSrcTypes
aicPriorityOutputMode = "Value"
aicPrioritySymbolStem = "PRIORITY"
getNameValueCaptionTuple( "AIC_SMR__" + aicPrioritySymbolStem, aicPriorityChoices )
if not len( aicPriorityChoices ):
aicPrioritySymbolStem = "PRIOR"
getNameValueCaptionTuple( "AIC_SMR__" + aicPrioritySymbolStem, aicPriorityChoices )
if not len( aicPriorityChoices ):
# still not found in the atdf; so set some defaults
aicPriorityChoices.append( ( "MINIMUM", "0x0", "Minimum priority" ) )
aicPriorityChoices.append( ( "VERY_LOW", "0x1", "Very low priority" ) )
aicPriorityChoices.append( ( "LOW", "0x2", "Low priority" ) )
aicPriorityChoices.append( ( "MEDIUM_LOW", "0x3", "Medium priority" ) )
aicPriorityChoices.append( ( "MEDIUM_HIGH","0x4", "Medium high priority" ) )
aicPriorityChoices.append( ( "HIGH", "0x5", "High priority" ) )
aicPriorityChoices.append( ( "VERY_HIGH", "0x6", "Very high priority" ) )
aicPriorityChoices.append( ( "MAXIMUM", "0x7", "Maximum priority" ) )
aicSmrPrioritySymbol = theCoreComponent.createStringSymbol( "AIC_SMR_PRIORITY_SYMBOL", None )
aicSmrPrioritySymbol.setDefaultValue( "AIC_SMR_" + aicPrioritySymbolStem )
aicSmrPrioritySymbol.setVisible( False )
#
aicSrcTypeSymbolStem = "SRCTYPE"
getNameValueCaptionTuple( "AIC_SMR__" + aicSrcTypeSymbolStem, aicSrcTypes )
aicSmrSrcTypeSymbol = theCoreComponent.createStringSymbol( "AIC_SMR_SRCTYPE_SYMBOL", None )
aicSmrSrcTypeSymbol.setDefaultValue( "AIC_SMR_" + aicSrcTypeSymbolStem )
aicSmrSrcTypeSymbol.setVisible( False )
#
if "SAMA5" in theProcessor:
aicMenuTitle = "Interrupts (AIC/SAIC)"
aicRedirectionVisibility = True
aicMapTypeVisibility = True
neverSecureList = [ '49', '62' ]
alwaysSecureList = [ '0', '14', '15', '16', '18', '51', '61', '68', '69', '70' ]
programmedSecureList = [] # Todo create map interface to populate this list
externalList = [ '0', '49' ] # '2', '56', '57', '64', '65', '66', '67', '71', '72' have been subsumed data sheet peripheral table is misleading
elif "SAM9X60" in theProcessor:
aicMenuTitle = "Interrupts"
aicRedirectionVisibility = False
aicMapTypeVisibility = False
neverSecureList = [ str( ii ) for ii in list( range( 0, 50 ) | |
#
# Taranos Cloud Sonification Framework Tutorial #1: Switched-On Taranos
# Copyright (C) 2018 <NAME>, Netrogen Blue LLC (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is the first of a series of living tutorials intended to demonstrate the capabilities and usage of the Taranos
# Cloud Sonification Framework. They are alive in the sense that they contain functioning code ready to be run and then
# poked, prodded and dissected on the student's lab table. The user is encouraged to experiment with the code as
# desired to accelerate the learning process.
#
# The tutorial samples take advantage of the Taranos Python "Pseudo-API", or PAPI, to demonstrate the higher level
# semantics of the operations supported by the Taranos server. This spares the user from having to dismember the HTTP
# request and response bodies of the actual Taranos web API service calls in order to obtain the same information. It
# should be stressed however that the web API is the canonical interface for interacting with Taranos and that the PAPI
# is simply a supporting tool. The PAPI is not guaranteed to support all of the web API's capabilities.
#
# To use the PAPI ensure you have the location of the Python package named taranoscsfpapi in your PYTHONPATH.
# The PAPI namespace is organized into a number of different concerns, including services for signaling, rendering, and
# general accessor management. Different accessors will require some but not others depending on their roles. For
# instance, an accessor that serves only as a signal provider would not require the rendering namespace. Likewise a
# dedicated sonification rendering device may have no use for the signaling namespace. This tutorial will make use of
# all of those mentioned so we'll import all of their modules:
from taranoscsfpapi.management import *
from taranoscsfpapi.rendering import *
from taranoscsfpapi.signaling import *
# To get started we need to initialize the PAPI itself. This will communicate with the designated Taranos server
# to fetch some initial configuration information used to simplify the PAPI interactions. Setting the
# is_verbose argument to True will enable printing of the HTTP request and response data for every service call.
# This will help you understand the nature of the actual Taranos web API calls performed underneath.
#
# You will see that all request and response data is encapsulated within JSON objects. JSON is the standard
# format for Taranos messaging. You will also see that the JSON name strings are highly compacted using a minimalist
# approach to symbol naming. This is by design with consideration to the memory and bandwidth limitations of
# resource-constrained devices. It is a goal of the Taranos project to enable sonification sharing among as
# many diverse classes of devices as possible from the cloud-hosted to the wearable. Thus it is a general
# philosophy of the project that when it comes to issues of data communication the limitations of devices will
# be accommodated over those of humans.
papi_init(server_url='http://localhost:9000', is_verbose=True)
# For tutorial purposes our next task is to reset the Taranos simulation cell associated with our accessor so
# that we are starting with a clean configuration. This will destroy any prior model elements owned by our
# accessor and then allocate and initialize new default signal trunk and wavefield models for it. [Note: As of
# [February 2017] the Taranos Reference Server supports only a single implied accessor.]
destroy_cell()
# The newly created cell now contains a default wavefield instance. Here we will fetch a report of its configuration.
# All reports contain sections which are aligned to model element propertysets. Every report will contain the meta
# propertyset since it applies to every model element. Below we are also requesting the refs ("r") propertyset which
# will contain all of the keys of the other elements that the field knows.
rf = report_field(sections='r')
print(rf)
# Notice that within the refs section there is a reference to the field's default field emitter.
assert(rf['r']['_fe'])
# We know this because within the Taranos API "_" is shorthand for "default" and "fe" is shorthand for "field emitter".
#
# Emitters are the sources of the waveforms modeled by Taranos. They own collections of oscillators which determine the
# qualities of the waveforms produced. They may be attached to wavefield bodies, known as subjects and probes, or they
# may be associated with the wavefield itself as is the case with field emitters.
# Next we will extract the default field emitter's key from the field report. All Taranos model elements are referenced
# by an immutable key string. This key will be used in subsequent service calls to refer to this emitter.
kfe = rf['r']['_fe']
assert(kfe[-3:] == '~fe')
# Notice that the key is a UUID followed by the substring "~fe". Taranos uses the concept of typed-keys where every
# model element key is appended with a suffix that indicates its model element type. This makes it a bit easier for
# humans to reason about and work with API service calls involving multiple keys.
# From a modeling perspective, emitters do not produce waveforms until the wavefield is sampled by a waveform collector.
# The simplest way to query a collector is by evoking a field "sampler". Samplers are ephemeral collectors which exist
# only for the duration of the querying service call. While convenient, they have no persistence so they cannot be
# shared by multiple service accessors. For that you would use the other kind of collector, the probe collector, which
# will be described in a later tutorial.
#
# Being field entities like probes and subjects, samplers have geometrical coordinates within the field. A field
# entity's default coordinates are always the origin position of the field, which in the case of a 2-dimensional field
# is {0, 0}. They also have other properties which affect their waveform detection performance. The default values for
# those properties are acceptable for now.
#
# Here we will sample the field for waveforms using a standard sampler which is configured by default to be located at
# the field origin position.
rw = report_waveforms()
print(rw)
# Notice however that the report is empty:
assert(not rw)
# This means the collector detected no waveforms to report. Why is this? The answer can be found by diving a little
# deeper into the emitter's oscillators.
# First, let's fetch another field emitter report but this time one that includes a section ("r") that contains the
# emitter's references to other elements:
rfe = report_field_emitter(key=kfe, sections='r')
print(rfe)
# Notice that, just as the default field is associated with a default field emitter, that emitter is associated with a
# default field oscillator:
assert(rfe['r']['_fo'])
# Next, let's get a field oscillator report that also includes its references.
kfo = rfe['r']['_fo']
rfo = report_field_oscillator(key=kfo, sections='r')
print(rfo)
# Notice that the oscillator is associated with an oscillator patch:
assert(rfo['r']['smpo'])
# Every oscillator has a patch which determines how the energy channeled to it by its parent emitter is mapped into
# various wavefield properties. Since an oscillator will always have an associated patch and never more than one it is
# simply referred to by the shorthand "smpo" (no underscore).
#
# The oscillator patch is our introduction to a kind of model element known as a signal modulator. Signal modulators
# provide the rendering model's access to the underlying signaling model. Signaling will be explained in more detail
# in a later tutorial. For now we will enjoy the fact that the signaling model does not have to be well-understood to
# make practical use of rendering model elements.
#
# Let's fetch an oscillator patch report that includes its references:
ksmpo = rfo['r']['smpo']
rsmpo = report_oscillator_patch(key=ksmpo, sections='r')
print(rsmpo)
# Notice that the oscillator patch has 5 signal output modulators ("smo") associated with it. | |
# Copyright (c) 2009, <NAME>'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows platform implementation."""
import contextlib
import errno
import functools
import os
import signal
import sys
import time
from collections import namedtuple
from . import _common
from ._common import AccessDenied
from ._common import conn_tmap
from ._common import conn_to_ntuple
from ._common import debug
from ._common import ENCODING
from ._common import ENCODING_ERRS
from ._common import isfile_strict
from ._common import memoize
from ._common import memoize_when_activated
from ._common import NoSuchProcess
from ._common import parse_environ_block
from ._common import TimeoutExpired
from ._common import usage_percent
from ._compat import long
from ._compat import lru_cache
from ._compat import PY3
from ._compat import range
from ._compat import unicode
from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS
from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS
from ._psutil_windows import HIGH_PRIORITY_CLASS
from ._psutil_windows import IDLE_PRIORITY_CLASS
from ._psutil_windows import NORMAL_PRIORITY_CLASS
from ._psutil_windows import REALTIME_PRIORITY_CLASS
try:
from . import _psutil_windows as cext
except ImportError as err:
if str(err).lower().startswith("dll load failed") and \
sys.getwindowsversion()[0] < 6:
# We may get here if:
# 1) we are on an old Windows version
# 2) psutil was installed via pip + wheel
# See: https://github.com/giampaolo/psutil/issues/811
msg = "this Windows version is too old (< Windows Vista); "
msg += "psutil 3.4.2 is the latest version which supports Windows "
msg += "2000, XP and 2003 server"
raise RuntimeError(msg)
else:
raise
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# process priority constants, import from __init__.py:
# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
__extra__all__ = [
"win_service_iter", "win_service_get",
# Process priority
"ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", "NORMAL_PRIORITY_CLASS",
"REALTIME_PRIORITY_CLASS",
# IO priority
"IOPRIO_VERYLOW", "IOPRIO_LOW", "IOPRIO_NORMAL", "IOPRIO_HIGH",
# others
"CONN_DELETE_TCB", "AF_LINK",
]
# =====================================================================
# --- globals
# =====================================================================
CONN_DELETE_TCB = "DELETE_TCB"
ERROR_PARTIAL_COPY = 299
PYPY = '__pypy__' in sys.builtin_module_names
if enum is None:
AF_LINK = -1
else:
AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1})
AF_LINK = AddressFamily.AF_LINK
TCP_STATUSES = {
cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
if enum is not None:
class Priority(enum.IntEnum):
ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS
BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS
IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS
globals().update(Priority.__members__)
if enum is None:
IOPRIO_VERYLOW = 0
IOPRIO_LOW = 1
IOPRIO_NORMAL = 2
IOPRIO_HIGH = 3
else:
class IOPriority(enum.IntEnum):
IOPRIO_VERYLOW = 0
IOPRIO_LOW = 1
IOPRIO_NORMAL = 2
IOPRIO_HIGH = 3
globals().update(IOPriority.__members__)
pinfo_map = dict(
num_handles=0,
ctx_switches=1,
user_time=2,
kernel_time=3,
create_time=4,
num_threads=5,
io_rcount=6,
io_wcount=7,
io_rbytes=8,
io_wbytes=9,
io_count_others=10,
io_bytes_others=11,
num_page_faults=12,
peak_wset=13,
wset=14,
peak_paged_pool=15,
paged_pool=16,
peak_non_paged_pool=17,
non_paged_pool=18,
pagefile=19,
peak_pagefile=20,
mem_private=21,
)
# =====================================================================
# --- named tuples
# =====================================================================
# psutil.cpu_times()
scputimes = namedtuple('scputimes',
['user', 'system', 'idle', 'interrupt', 'dpc'])
# psutil.virtual_memory()
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
# psutil.Process.memory_info()
pmem = namedtuple(
'pmem', ['rss', 'vms',
'num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
'pagefile', 'peak_pagefile', 'private'])
# psutil.Process.memory_full_info()
pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', ))
# psutil.Process.memory_maps(grouped=True)
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
# psutil.Process.memory_maps(grouped=False)
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# psutil.Process.io_counters()
pio = namedtuple('pio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'other_count', 'other_bytes'])
# =====================================================================
# --- utils
# =====================================================================
@lru_cache(maxsize=512)
def convert_dos_path(s):
r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt"
"""
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = cext.QueryDosDevice(rawdrive)
remainder = s[len(rawdrive):]
return os.path.join(driveletter, remainder)
def py2_strencode(s):
"""Encode a unicode string to a byte string by using the default fs
encoding + "replace" error handler.
"""
if PY3:
return s
else:
if isinstance(s, str):
return s
else:
return s.encode(ENCODING, ENCODING_ERRS)
@memoize
def getpagesize():
return cext.getpagesize()
# =====================================================================
# --- memory
# =====================================================================
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
#
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, round_=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total = mem[2]
free = mem[3]
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sswap(total, used, free, percent, 0, 0)
# =====================================================================
# --- disk
# =====================================================================
disk_io_counters = cext.disk_io_counters
def disk_usage(path):
"""Return disk usage associated with path."""
if PY3 and isinstance(path, bytes):
# XXX: do we want to use "strict"? Probably yes, in order
# to fail immediately. After all we are accepting input here...
path = path.decode(ENCODING, errors="strict")
total, free = cext.disk_usage(path)
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [_common.sdiskpart(*x) for x in rawlist]
# =====================================================================
# --- CPU
# =====================================================================
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
# Internally, GetSystemTimes() is used, and it doesn't return
# interrupt and dpc times. cext.per_cpu_times() does, so we
# rely on it to get those only.
percpu_summed = scputimes(*[sum(n) for n in zip(*cext.per_cpu_times())])
return scputimes(user, system, idle,
percpu_summed.interrupt, percpu_summed.dpc)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for user, system, idle, interrupt, dpc in cext.per_cpu_times():
item = scputimes(user, system, idle, interrupt, dpc)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_cores():
"""Return the number of CPU cores in the system."""
return cext.cpu_count_cores()
def cpu_stats():
"""Return CPU statistics."""
ctx_switches, interrupts, dpcs, syscalls = cext.cpu_stats()
soft_interrupts = 0
return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
syscalls)
def cpu_freq():
"""Return CPU frequency.
On Windows per-cpu frequency is not supported.
"""
curr, max_ = cext.cpu_freq()
min_ = 0.0
return [_common.scpufreq(float(curr), min_, float(max_))]
_loadavg_inititialized = False
def getloadavg():
"""Return the number of processes in the system run queue averaged
over the last 1, 5, and 15 minutes respectively as a tuple"""
global _loadavg_inititialized
if not _loadavg_inititialized:
cext.init_loadavg_counter()
_loadavg_inititialized = True
# Drop to 2 decimal points which is what Linux does
raw_loads = cext.getloadavg()
return tuple([round(load, 2) for load in raw_loads])
# =====================================================================
# --- network
# =====================================================================
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, TCP_STATUSES,
pid=pid if _pid == -1 else None)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = {}
rawdict = cext.net_if_stats()
for name, items in rawdict.items():
if not PY3:
assert isinstance(name, unicode), type(name)
name = py2_strencode(name)
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
ret = cext.net_io_counters()
return dict([(py2_strencode(k), v) for k, v in ret.items()])
def net_if_addrs():
"""Return the addresses associated to each NIC."""
ret = []
for items in cext.net_if_addrs():
items = list(items)
items[0] = py2_strencode(items[0])
ret.append(items)
return ret
# =====================================================================
# --- sensors
# =====================================================================
def sensors_battery():
"""Return battery information."""
# For constants meaning see:
# https://msdn.microsoft.com/en-us/library/windows/desktop/
# aa373232(v=vs.85).aspx
acline_status, flags, percent, secsleft = cext.sensors_battery()
power_plugged = acline_status == 1
no_battery = bool(flags & 128)
charging = bool(flags & 8)
if no_battery:
return None
if power_plugged or charging:
secsleft = _common.POWER_TIME_UNLIMITED
elif secsleft == -1:
secsleft = _common.POWER_TIME_UNKNOWN
return _common.sbattery(percent, secsleft, power_plugged)
# =====================================================================
# --- other system functions
# =====================================================================
_last_btime = 0
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
# This dirty hack is to adjust the precision of the returned
# value which may have a 1 second fluctuation, see:
# https://github.com/giampaolo/psutil/issues/1007
global _last_btime
ret = float(cext.boot_time())
if abs(ret - _last_btime) <= 1:
return _last_btime
else:
_last_btime = ret
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
user = py2_strencode(user)
nt = _common.suser(user, None, hostname, tstamp, None)
retlist.append(nt)
return retlist
# =====================================================================
# --- Windows services
# =====================================================================
def win_service_iter():
"""Yields a list of WindowsService instances."""
for name, display_name in cext.winservice_enumerate():
yield WindowsService(py2_strencode(name), py2_strencode(display_name))
def win_service_get(name):
"""Open a Windows service and return it as a WindowsService instance."""
service = WindowsService(name, None)
service._display_name = service._query_config()['display_name']
return service
class WindowsService(object):
"""Represents an installed Windows service."""
def __init__(self, name, | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import zipfile
from random import randint
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.util import get_file_json
from azure.mgmt.web.models import SkuDescription
from ._constants import (NETCORE_VERSION_DEFAULT, NETCORE_VERSIONS, NODE_VERSION_DEFAULT,
NODE_VERSIONS, NETCORE_RUNTIME_NAME, NODE_RUNTIME_NAME, ASPDOTNET_RUNTIME_NAME,
ASPDOTNET_VERSION_DEFAULT, DOTNET_VERSIONS, STATIC_RUNTIME_NAME,
PYTHON_RUNTIME_NAME, PYTHON_VERSION_DEFAULT, LINUX_SKU_DEFAULT, OS_DEFAULT,
NODE_VERSION_NEWER, DOTNET_RUNTIME_NAME, DOTNET_VERSION_DEFAULT, ASPDOTNET_VERSIONS,
DOTNET_TARGET_FRAMEWORK_REGEX, GENERATE_RANDOM_APP_NAMES)
logger = get_logger(__name__)
def _resource_client_factory(cli_ctx, **_):
from azure.cli.core.profiles import ResourceType
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
def web_client_factory(cli_ctx, **_):
from azure.mgmt.web import WebSiteManagementClient
return get_mgmt_service_client(cli_ctx, WebSiteManagementClient)
def zip_contents_from_dir(dirPath, lang):
import tempfile
import uuid
relroot = os.path.abspath(tempfile.gettempdir())
path_and_file = os.path.splitdrive(dirPath)[1]
file_val = os.path.split(path_and_file)[1]
file_val_unique = file_val + str(uuid.uuid4())[:259]
zip_file_path = relroot + os.path.sep + file_val_unique + ".zip"
abs_src = os.path.abspath(dirPath)
try:
with zipfile.ZipFile("{}".format(zip_file_path), "w", zipfile.ZIP_DEFLATED) as zf:
for dirname, subdirs, files in os.walk(dirPath):
# skip node_modules folder for Node apps,
# since zip_deployment will perform the build operation
if lang.lower() == NODE_RUNTIME_NAME:
subdirs[:] = [d for d in subdirs if 'node_modules' not in d]
elif lang.lower() == NETCORE_RUNTIME_NAME:
subdirs[:] = [d for d in subdirs if d not in ['obj', 'bin']]
elif lang.lower() == PYTHON_RUNTIME_NAME:
subdirs[:] = [d for d in subdirs if 'env' not in d] # Ignores dir that contain env
filtered_files = []
for filename in files:
if filename == '.env':
logger.info("Skipping file: %s/%s", dirname, filename)
else:
filtered_files.append(filename)
files[:] = filtered_files
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
except IOError as e:
if e.errno == 13:
raise CLIError('Insufficient permissions to create a zip in current directory. '
'Please re-run the command with administrator privileges')
raise CLIError(e)
return zip_file_path
def get_runtime_version_details(file_path, lang_name):
version_detected = None
version_to_create = None
if lang_name.lower() == DOTNET_RUNTIME_NAME:
version_detected = parse_dotnet_version(file_path, DOTNET_VERSION_DEFAULT)
version_to_create = detect_dotnet_version_tocreate(version_detected, DOTNET_VERSION_DEFAULT, DOTNET_VERSIONS)
elif lang_name.lower() == NETCORE_RUNTIME_NAME:
# method returns list in DESC, pick the first
version_detected = parse_netcore_version(file_path)[0]
version_to_create = detect_netcore_version_tocreate(version_detected)
elif lang_name.lower() == ASPDOTNET_RUNTIME_NAME:
# method returns list in DESC, pick the first
version_detected = parse_dotnet_version(file_path, ASPDOTNET_VERSION_DEFAULT)
version_to_create = detect_dotnet_version_tocreate(version_detected,
ASPDOTNET_VERSION_DEFAULT, ASPDOTNET_VERSIONS)
elif lang_name.lower() == NODE_RUNTIME_NAME:
if file_path == '':
version_detected = "-"
version_to_create = NODE_VERSION_DEFAULT
else:
version_detected = parse_node_version(file_path)[0]
version_to_create = detect_node_version_tocreate(version_detected)
elif lang_name.lower() == PYTHON_RUNTIME_NAME:
version_detected = "-"
version_to_create = PYTHON_VERSION_DEFAULT
elif lang_name.lower() == STATIC_RUNTIME_NAME:
version_detected = "-"
version_to_create = "-"
return {'detected': version_detected, 'to_create': version_to_create}
def create_resource_group(cmd, rg_name, location):
from azure.cli.core.profiles import ResourceType, get_sdk
rcf = _resource_client_factory(cmd.cli_ctx)
resource_group = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models')
rg_params = resource_group(location=location)
return rcf.resource_groups.create_or_update(rg_name, rg_params)
def check_resource_group_exists(cmd, rg_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.resource_groups.check_existence(rg_name)
def _check_resource_group_supports_os(cmd, rg_name, is_linux):
# get all appservice plans from RG
client = web_client_factory(cmd.cli_ctx)
plans = list(client.app_service_plans.list_by_resource_group(rg_name))
for item in plans:
# for Linux if an app with reserved==False exists, ASP doesn't support Linux
if is_linux and not item.reserved:
return False
if not is_linux and item.reserved:
return False
return True
def get_num_apps_in_asp(cmd, rg_name, asp_name):
client = web_client_factory(cmd.cli_ctx)
return len(list(client.app_service_plans.list_web_apps(rg_name, asp_name)))
# pylint:disable=unexpected-keyword-arg
def get_lang_from_content(src_path, html=False):
# NODE: package.json should exist in the application root dir
# NETCORE & DOTNET: *.csproj should exist in the application dir
# NETCORE: <TargetFramework>netcoreapp2.0</TargetFramework>
# DOTNET: <TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku'])
package_json_file = os.path.join(src_path, 'package.json')
package_python_file = os.path.join(src_path, 'requirements.txt')
static_html_file = ""
package_netcore_file = ""
runtime_details_dict['language'] = ''
runtime_details_dict['file_loc'] = ''
runtime_details_dict['default_sku'] = 'F1'
import fnmatch
for _dirpath, _dirnames, files in os.walk(src_path):
for file in files:
if html and (fnmatch.fnmatch(file, "*.html") or fnmatch.fnmatch(file, "*.htm") or
fnmatch.fnmatch(file, "*shtml.")):
static_html_file = os.path.join(src_path, file)
break
if fnmatch.fnmatch(file, "*.csproj"):
package_netcore_file = os.path.join(src_path, file)
if not os.path.isfile(package_netcore_file):
package_netcore_file = os.path.join(_dirpath, file)
break
if html:
if static_html_file:
runtime_details_dict['language'] = STATIC_RUNTIME_NAME
runtime_details_dict['file_loc'] = static_html_file
runtime_details_dict['default_sku'] = 'F1'
else:
raise CLIError("The html flag was passed, but could not find HTML files, "
"see 'https://go.microsoft.com/fwlink/?linkid=2109470' for more information")
elif os.path.isfile(package_python_file):
runtime_details_dict['language'] = PYTHON_RUNTIME_NAME
runtime_details_dict['file_loc'] = package_python_file
runtime_details_dict['default_sku'] = LINUX_SKU_DEFAULT
elif os.path.isfile(package_json_file) or os.path.isfile('server.js') or os.path.isfile('index.js'):
runtime_details_dict['language'] = NODE_RUNTIME_NAME
runtime_details_dict['file_loc'] = package_json_file if os.path.isfile(package_json_file) else ''
runtime_details_dict['default_sku'] = LINUX_SKU_DEFAULT
elif package_netcore_file:
runtime_lang = detect_dotnet_lang(package_netcore_file)
runtime_details_dict['language'] = runtime_lang
runtime_details_dict['file_loc'] = package_netcore_file
runtime_details_dict['default_sku'] = 'F1'
else: # TODO: Update the doc when the detection logic gets updated
raise CLIError("Could not auto-detect the runtime stack of your app.\n"
"HINT: Are you in the right folder?\n"
"For more information, see 'https://go.microsoft.com/fwlink/?linkid=2109470'")
return runtime_details_dict
def detect_dotnet_lang(csproj_path):
import xml.etree.ElementTree as ET
import re
parsed_file = ET.parse(csproj_path)
root = parsed_file.getroot()
version_lang = ''
version_full = ''
for target_ver in root.iter('TargetFramework'):
version_full = target_ver.text
version_full = ''.join(version_full.split()).lower()
version_lang = re.sub(r'([^a-zA-Z\s]+?)', '', target_ver.text)
if 'netcore' in version_lang.lower():
return NETCORE_RUNTIME_NAME
if version_full and re.fullmatch(DOTNET_TARGET_FRAMEWORK_REGEX, version_full):
return DOTNET_RUNTIME_NAME
return ASPDOTNET_RUNTIME_NAME
def parse_dotnet_version(file_path, default_version):
version_detected = [default_version]
try:
from xml.dom import minidom
import re
xmldoc = minidom.parse(file_path)
framework_ver = xmldoc.getElementsByTagName('TargetFrameworkVersion')
if not framework_ver:
framework_ver = xmldoc.getElementsByTagName('TargetFramework')
target_ver = framework_ver[0].firstChild.data
non_decimal = re.compile(r'[^\d.]+')
# reduce the version to '5.7.4' from '5.7'
if target_ver is not None:
# remove the string from the beginning of the version value
c = non_decimal.sub('', target_ver)
version_detected = c[:3]
except: # pylint: disable=bare-except
logger.warning("Could not parse dotnet version from *.csproj. Defaulting to %s", version_detected[0])
version_detected = version_detected[0]
return version_detected
def parse_netcore_version(file_path):
import xml.etree.ElementTree as ET
import re
version_detected = ['0.0']
parsed_file = ET.parse(file_path)
root = parsed_file.getroot()
for target_ver in root.iter('TargetFramework'):
version_detected = re.findall(r"\d+\.\d+", target_ver.text)
# incase of multiple versions detected, return list in descending order
version_detected = sorted(version_detected, key=float, reverse=True)
return version_detected
def parse_node_version(file_path):
# from node experts the node value in package.json can be found here "engines": { "node": ">=10.6.0"}
import json
import re
version_detected = []
with open(file_path) as data_file:
data = json.load(data_file)
for key, value in data.items():
if key == 'engines' and 'node' in value:
value_detected = value['node']
non_decimal = re.compile(r'[^\d.]+')
# remove the string ~ or > that sometimes exists in version value
c = non_decimal.sub('', value_detected)
# reduce the version to '6.0' from '6.0.0'
if '.' in c: # handle version set as 4 instead of 4.0
num_array = c.split('.')
num = num_array[0] + "." + num_array[1]
else:
num = c + ".0"
version_detected.append(num)
return version_detected or ['0.0']
def detect_netcore_version_tocreate(detected_ver):
if detected_ver in NETCORE_VERSIONS:
return detected_ver
return NETCORE_VERSION_DEFAULT
def detect_dotnet_version_tocreate(detected_ver, default_version, versions_list):
min_ver = versions_list[0]
if detected_ver in versions_list:
return detected_ver
if detected_ver < min_ver:
return min_ver
return default_version
def detect_node_version_tocreate(detected_ver):
if detected_ver in NODE_VERSIONS:
return detected_ver
# get major version & get the closest version from supported list
major_ver = int(detected_ver.split('.')[0])
node_ver = NODE_VERSION_DEFAULT
# TODO: Handle checking for minor versions if node major version is 10
if major_ver <= 11:
node_ver = NODE_VERSION_DEFAULT
else:
node_ver = NODE_VERSION_NEWER
return node_ver
def find_key_in_json(json_data, key):
for k, v in json_data.items():
if key in k:
yield v
elif isinstance(v, dict):
for id_val in find_key_in_json(v, key):
yield id_val
def set_location(cmd, sku, location):
client = web_client_factory(cmd.cli_ctx)
if location is None:
locs = client.list_geo_regions(sku, True)
available_locs = []
for loc in locs:
available_locs.append(loc.name)
loc = available_locs[0]
else:
loc = location
return loc.replace(" ", "").lower()
def get_site_availability(cmd, name):
""" This is used by az webapp up to verify if a site needs to be created or should just be deployed"""
client = web_client_factory(cmd.cli_ctx)
availability = client.check_name_availability(name, 'Site')
# check for "." in app name. it is valid for hostnames to contain it, but not allowed for webapp names
if "." in name:
availability.name_available = False
availability.reason = "Invalid"
availability.message = ("Site names only allow alphanumeric characters and hyphens, "
"cannot start or end in a hyphen, and must be less than 64 chars.")
return availability
def get_app_details(cmd, name):
client = web_client_factory(cmd.cli_ctx)
data = (list(filter(lambda x: name.lower() == x.name.lower(), client.web_apps.list())))
_num_items = len(data)
if _num_items > 0:
return data[0]
return None
def get_rg_to_use(user, rg_name=None):
default_rg = "{}_rg_{:04}".format(user, randint(0, 9999))
if rg_name is not None:
return rg_name
return default_rg
def get_profile_username():
from azure.cli.core._profile import Profile
user = Profile().get_current_account_user()
user = user.split('@', 1)[0]
if len(user.split('#', 1)) > 1: # on cloudShell user is in format live.com#user@domain.<EMAIL>
user = user.split('#', 1)[1]
return user
def get_sku_to_use(src_dir, html=False, sku=None, runtime=None):
if sku is None:
if runtime: # user overrided language detection by specifiying runtime
return 'F1'
lang_details = get_lang_from_content(src_dir, html)
return lang_details.get("default_sku")
| |
"""Module with base abstraction of common objects."""
import pickle
import sys
from abc import ABC, abstractmethod
from functools import reduce
from itertools import islice
from typing import List, Iterable, Iterator, Tuple, Union, Type, Any
import dill
import numpy as np
import pandas as pd
from pandas.core.tools.datetimes import DatetimeScalar, Timestamp
from cgnal.core.typing import PathLike, T, T_co
from cgnal.core.utils.dict import groupIterable
if sys.version_info[0] < 3:
pass
from typing import Generic, Callable, Sequence
class Serializable(ABC):
"""Abstract Class to be used to extend objects that can be serialised."""
@abstractmethod
def write(self, filaname: PathLike) -> None:
"""Write class to a file."""
...
@classmethod
@abstractmethod
def load(cls, filename: PathLike) -> "Serializable":
"""Load class from a file."""
...
class PickleSerialization(Serializable):
"""Serialization based on pickle package."""
def write(self, filename: PathLike) -> None:
"""
Write instance as pickle.
:param filename: Name of the file where to save the instance
:return: None
"""
with open(filename, "wb") as fid:
pickle.dump(self, fid)
@classmethod
def load(cls, filename: PathLike) -> "PickleSerialization":
"""
Load instance from pickle.
:param filename: Name of the file to be read
:return: Instance of the read Model
"""
with open(filename, "rb") as fid:
return pickle.load(fid)
class DillSerialization(Serializable):
"""Serialization based on dill package."""
def write(self, filename: PathLike) -> None:
"""
Write instance as pickle.
:param filename: Name of the file where to save the instance
:return: None
"""
with open(filename, "wb") as fid:
dill.dump(self, fid)
@classmethod
def load(cls, filename: PathLike) -> "DillSerialization":
"""
Load instance from file.
:param filename: Name of the file to be read
:return: Instance of the read Model
"""
with open(filename, "rb") as fid:
return dill.load(fid)
class IterGenerator(Generic[T]):
"""Base class representing any generator."""
def __init__(self, generator_function: Callable[[], Iterator[T]]):
"""
Class that allows a given generator to be accessed as an Iterator via .iterator property.
:param generator_function: function that outputs a generator
"""
self.generator_function = generator_function
@property
def iterator(self) -> Iterator[T]:
"""
Return an iterator over the given generator function.
:return: an iterator
"""
return self.generator_function()
class BaseIterable(Generic[T], ABC):
"""
Class to provide base interfaces and methods for enhancing iterables classes and enable more functional approaches.
In particular, the class provides among others implementation for map, filter and foreach methods.
"""
@property
@abstractmethod
def items(self) -> Iterable[T]:
"""
Return an iterator over the items.
:return: Iterable[T]
"""
raise NotImplementedError
@property
@abstractmethod
def cached(self) -> bool:
"""
Whether the iterable is cached in memory or lazy.
:return: boolean indicating whether iterable is fully-stored in memory
"""
raise NotImplementedError
@property
def __lazyType__(self) -> "Type[LazyIterable]":
"""Specify the type of LazyObject associated to this class."""
return LazyIterable
@property
def __cachedType__(self) -> "Type[CachedIterable]":
"""Specify the type of CachedObject associated to this class."""
return CachedIterable
@property
def asLazy(self) -> "LazyIterable":
"""
Provide a lazy representation of the iterable.
:return: lazy iterable
"""
def generator():
for item in self:
yield item
return self.__lazyType__(IterGenerator(generator))
@property
def asCached(self) -> "CachedIterable":
"""
Provide an in-memory cached representation of the iterable.
:return: cached iterable
"""
return self.__cachedType__(list(self.items))
def take(self, size: int) -> "Iterable[T]":
"""
Take the first n elements of the iterables.
:param size: number of elements to be taken
:return: cached iterable with the first elements
"""
return self.__cachedType__(list(islice(self, size)))
def filter(self, f: Callable[[T], bool]) -> "LazyIterable[T]":
"""
Return an iterable where elements have been filtered based on a boolean function.
:param f: boolean function that selects items
:return: lazy iterable with elements filtered
"""
def generator():
for item in self:
if f(item):
yield item
return self.__lazyType__(IterGenerator(generator))
def __iter__(self) -> Iterator[T]:
"""Return an iterator over the items."""
for item in self.items:
yield item
def batch(self, size: int = 100) -> "Iterator[CachedIterable[T]]":
"""
Return an iterator of batches of size *size*.
:param size: dimension of the batch
:return: iterator of batches
"""
for batch in groupIterable(self.items, batch_size=size):
yield self.__cachedType__(batch)
def map(self, f: Callable[[T], T_co]) -> "LazyIterable[T_co]":
"""
Map all elements of an iterable with the provided function.
:param f: function to be used to map the elements
:return: mapped iterable
"""
def generator():
for item in self:
yield f(item)
return self.__lazyType__(IterGenerator(generator))
def foreach(self, f: Callable[[T], Any]):
"""
Execute the provided function on each element of the iterable.
:param f: function to be executed for each element
:return: None
"""
for doc in self.items:
f(doc)
class LazyIterable(BaseIterable, Generic[T]):
"""Base class to be used for implementing lazy iterables."""
def __init__(self, items: IterGenerator):
"""
Return an instance of the class to be used for implementing lazy iterables.
:param items: IterGenerator containing the generator of items
"""
if not isinstance(items, IterGenerator):
raise TypeError(
"For lazy iterables the input must be an IterGenerator(object). Input of type %s passed"
% type(items)
)
self.__items__ = items
@property
def items(self) -> Iterator[T]:
"""Return an iterator over the items.
:return: Iterable[T]
"""
return self.__items__.iterator
@property
def cached(self) -> bool:
"""
Whether the iterable is cached in memory or lazy.
:return: boolean indicating whether iterable is fully-stored in memory
"""
return False
class CachedIterable(BaseIterable, Generic[T], DillSerialization):
"""Base class to be used for implementing cached iterables."""
def __init__(self, items: Sequence[T]):
"""
Return instance of a class to be used for implementing cached iterables.
:param items: sequence or iterable of elements
"""
self.__items__ = list(items)
def __len__(self) -> int:
"""Return the size of the list of elements."""
return len(self.items)
@property
def items(self) -> Sequence[T]:
"""
Return an iterator over the items.
:return: Iterable[T]
"""
return self.__items__
def __getitem__(self, item: int) -> T:
"""
Get the item by position index.
:param item: integer representing the position.
"""
return self.items[item]
@property
def cached(self) -> bool:
"""
Whether the iterable is cached in memory or lazy.
:return: boolean indicating whether iterable is fully-stored in memory
"""
return True
class BaseRange(ABC):
"""Abstract Range Class."""
@property
@abstractmethod
def start(self) -> Timestamp:
"""
Return the first timestamp.
:return: Timestamp
"""
raise NotImplementedError
@property
@abstractmethod
def end(self) -> Timestamp:
"""
Return the last timestamp.
:return: Timestamp
"""
raise NotImplementedError
@abstractmethod
def __iter__(self) -> Iterator["Range"]:
"""
Return an iterator over continuous ranges.
:return: Iterator[Range]
"""
...
@abstractmethod
def __add__(self, other: "BaseRange") -> "BaseRange":
"""
Return a range composed by two ranges.
:param other: other range to be merged
:return: merged range
"""
...
@abstractmethod
def overlaps(self, other: "BaseRange") -> bool:
"""
Return whether two ranges overlaps.
:param other: other range to be compared with
:return: True if the two ranges intersect, False otherwise
"""
...
@abstractmethod
def range(self, freq="H") -> List[Timestamp]:
"""
Return list of timestamps, spaced by given frequency.
:param freq: frequency of timestamps, valid values are "D" (day), "H" (hours), "M"(minute), "S" (seconds).
:return: list of timestamps
"""
...
def __str__(self) -> str:
"""Return string representation."""
return " // ".join([f"{r.start}-{r.end}" for r in self])
@property
def days(self) -> List[Timestamp]:
"""
Create date range with daily frequency.
:return: list of pd.Timestamp from start to end with daily frequency
"""
return self.range(freq="1D")
@property
def business_days(self) -> List[Timestamp]:
"""
Create date range with daily frequency.
:return: list of pd.Timestamp from start to end with daily frequency including only days from Mon to Fri
"""
return self.range(freq="1B")
@property
def minutes_15(self) -> List[Timestamp]:
"""
Create date range with daily frequency.
:return: list of pd.Timestamp from start to end with 15 minutes frequency
"""
return self.range(freq="15T")
class Range(BaseRange):
"""Base class for a continuous range."""
def __init__(self, start: DatetimeScalar, end: DatetimeScalar) -> None:
"""
Return a simple Range Class.
:param start: starting datetime for the range
:param end: ending datetime for the range
"""
self.__start__ = pd.to_datetime(start)
self.__end__ = pd.to_datetime(end)
if self.start > self.end:
raise ValueError(
"Start and End values should be consequential: start < end"
)
@property
def start(self) -> Timestamp:
"""
Return the first timestamp.
:return: Timestamp
"""
return self.__start__
@property
def end(self) -> Timestamp:
"""
Return the last timestamp.
:return: Timestamp
"""
return self.__end__
def __iter__(self) -> Iterator["Range"]:
"""
Return an iterator over continuous ranges.
:return: Iterator[Range]
"""
yield Range(self.start, self.end)
def range(self, freq="H") -> List[Timestamp]:
"""
Return list of timestamps, spaced by given frequency.
:param freq: given frequency
:return: list of timestamps
"""
return pd.date_range(self.start, self.end, freq=freq).tolist()
def __overlaps_range__(self, other: "Range") -> bool:
| |
jobsTable = "jobs"
else:
jobsTable = form['username'] + "_userview_jobs"
connection.execute(
text(
"UPDATE " +
jobsTable +
" SET status=:status WHERE jobid=:jobid"),
status=-
1,
jobid=id)
traceback.print_exc()
if connection:
connection.close()
return -1
def interpolateDataArray(dataArray, minResolution, startTime, endTime, screenSize):
for dataObj in dataArray:
dataObj["data"] = interpolateData(
asarray(
dataObj["data"]),
dataObj["channeltype"],
minResolution,
startTime,
endTime,
screenSize)
return dataArray
def interpolateData(data, type, resolution, startTime, endTime, screenSize):
# There is an ethical dilema here. For empty intervals I will interpolate from the closest value in time to start and end times.
# This can be milliseconds but it can also be hours and days. This can lead to bad interpolations in some cases.
if resolution:
# print(startTime,endTime,screenSize)
if(screenSize < 0): # I want to use the smallest resolution posible
timeInt = linspace(int(startTime), int(endTime), int((int(endTime) - int(startTime)) / int(resolution)))
else: # Using screen size to limit resolution
timeInt = linspace(int(startTime), int(endTime), int(screenSize))
if type == 1:
strings = [x for x in data[:, 1]]
data = asarray([[int(x), id(strings[i])] for i, x in enumerate(data[:, 0])])
intF = interp1d(data[:, 0], data, axis=0, kind="zero", fill_value=(data[0], data[-1]), bounds_error=False)
newData = intF(timeInt)
if type == 1:
newData = [[x[0], ctypes.cast(int(x[1]), ctypes.py_object).value] for x in newData]
else: # This is for the case that EVERYTHING is empty, gotta find a good way to do this
if type == 1:
strings = [x for x in data[:, 1]]
data = asarray([[int(x), id(strings[i])] for i, x in enumerate(data[:, 0])])
timeInt = linspace(int(startTime), int(endTime), 10)
intF = interp1d(data[:, 0], data, axis=0, kind="zero", fill_value=(data[0], data[-1]), bounds_error=False)
newData = intF(timeInt)
if type == 1:
newData = [[x[0], ctypes.cast(int(x[1]), ctypes.py_object).value] for x in newData]
if type == 1:
del strings
return newData
def getDataArray(channID, idx, channelData):
foundChannel = False
for channData in channelData:
if channData["channelID"] == channID:
return channData["data"][idx][1]
def getUserDetails(username, connection):
"""Auxiliary function used by several endpoints of the DAQBroker web application to gather database user information"""
#session = daqbrokerSettings.scoped()
#user = sele
result = connection.execute(
text("SELECT * FROM daqbroker_settings.users WHERE username=:theUser"),
theUser=username)
user = None
for row in result: # Should only return one, primary key and all
user = dict(zip(row.keys(), row))
return user
def parseMeta(server, db, instrument, meta, paths, logPort, lockList, session):
try:
database = db
theContext = zmq.Context()
if(meta.parsing[0].locked == False and meta.parsing[0].forcelock == False):
remarks = json.loads(meta.parsing[0].remarks)
metaremarks = json.loads(meta.remarks)
metaType = meta.type
warned = False
errors = []
try:
theLogSocket = theContext.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'INFO',
'process': 'COLLECTOR',
'message': "Parse LOCK - " + instrument["Name"],
'method': 'parseMeta'}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
meta.parsing[0].locked=True
meta.lastAction=time.time()
session.commit()
customChannels = [x for x in meta.channels if x.channeltype == 3]
if metaremarks["toParse"] =='1' or metaremarks["toParse"] =='true' or not metaremarks["toParse"] =='': # This is for file parsing
if metaType == 0: # File parsing
if metaremarks['parsingInfo']['separator'] == 'tab':
metaremarks['parsingInfo']['separator'] = '\t'
elif metaremarks['parsingInfo']['separator'] == 'comma':
metaremarks['parsingInfo']['separator'] = ','
elif metaremarks['parsingInfo']['separator'] == 'semicolon':
metaremarks['parsingInfo']['separator'] = ';'
elif metaremarks['parsingInfo']['separator'] == 'colon':
metaremarks['parsingInfo']['separator'] = ':'
elif metaremarks['parsingInfo']['separator'] == 'space':
metaremarks['parsingInfo']['separator'] = ' '
header = []
for channel in meta.channels:
if not channel.channeltype == 2:
header.append({'name': channel.Name,
'alias': channel.alias,
"type": channel.channeltype,
'channelid': channel.channelid,
'firstClock': channel.firstClock,
'lastclock': channel.lastclock})
lastFound = 0
base_dir = '.'
if getattr(sys, 'frozen', False):
base_dir = os.path.join(sys._MEIPASS)
thePath = os.path.join(base_dir, paths["BACKUPPATH"], server, database, instrument["Name"], meta.name)
if 'getNested' in metaremarks:
if(metaremarks['getNested'] == "1" or metaremarks['getNested'] == "true" or metaremarks['getNested']):
walked = os.walk(thePath)
orderedFiles = []
for vals in walked:
for file in vals[2]:
orderedFiles.append(os.path.join(vals[0], file))
else:
def mtime(f): return os.stat(f).st_mtime
orderedFiles = list(sorted([os.path.join(thePath, x)
for x in os.listdir(thePath)], key=mtime, reverse=True))
else:
def mtime(f): return os.stat(f).st_mtime
orderedFiles = list(sorted([os.path.join(thePath, x)
for x in os.listdir(thePath)], key=mtime, reverse=True))
# reduce file list by comparing with pattern and extension
filesWithPattern = [
x for x in orderedFiles if fnmatch.fnmatch(
x,
'*' +
metaremarks["pattern"] +
'*.' +
metaremarks["extension"])] # reduced list
foundSyncedFiles = [x for t in orderedFiles for x in remarks if t == x['name']]
foundSyncedFilesNames = [
x for t in remarks for x in orderedFiles if(
x == t['name'] and fnmatch.fnmatch(
t['name'],
'*' +
metaremarks["pattern"] +
'*.' +
metaremarks["extension"]))]
notFoundSynced = list(set(filesWithPattern) - set(foundSyncedFilesNames))
#print(foundSyncedFiles)
for file in foundSyncedFiles:
j = remarks.index(file)
changes = False
linesParsed = int(remarks[j]["linesParsed"])
linesNotParsed = int(remarks[j]["linesNotParsed"])
lastParsedLine = int(remarks[j]["lastParsedLine"])
lastChangeDate = int(remarks[j]["lastChangeDate"])
parsedSize = int(remarks[j]["parsedSize"])
notParsedSize = int(remarks[j]["notParsedSize"])
totalLines = int(remarks[j]["totalLines"])
if 'lastTime' in remarks[j]:
lastTime = float(remarks[j]["lastTime"])
if 'parsedSize' not in file:
file['parsedSize'] = os.path.getsize(file['name'])
sizeDiff = file['parsedSize'] + file["notParsedSize"]
#if abs(os.path.getsize(file["name"]) - sizeDiff) > len(header) * 30
(lines, readSize) = collectFileLines(file['name'], metaremarks, header, offset=sizeDiff)
#print(len(lines),lines[0],lines[-1])
size = os.path.getsize(file['name'])
parseResult = parseFileLines(
lines,
header,
metaremarks['parsingInfo']['dataType'],
metaremarks,
instrument["Name"],
session,
db,
logPort,
meta.metaid)
linesParsed = linesParsed + parseResult["linesParsed"]
linesNotParsed = linesNotParsed + parseResult["linesNotParsed"]
lastParsedLine = lastParsedLine + linesParsed + linesNotParsed
totalLines = totalLines + len(lines)
parsedSize = parsedSize + parseResult["parsedSize"]
notParsedSize = notParsedSize + parseResult["notParsedSize"]
changeDate = os.path.getmtime(file['name'])
lastTime = parseResult["lastTime"]
remarks[j] = {
'singleName': file['singleName'],
"name": file['name'],
"size": size,
"lastChangeDate": changeDate,
"linesParsed": linesParsed,
"linesNotParsed": linesNotParsed,
"lastParsedLine": lastParsedLine,
"totalLines": totalLines,
'lastTime': lastTime,
'parsedSize': parsedSize,
'notParsedSize': notParsedSize
}
remarks = sorted(remarks, key=lambda k: k['lastChangeDate'], reverse=True)
meta.parsing[0].remarks=json.dumps(remarks)
for file in notFoundSynced:
readSize = 0
lines = []
notDone = True
(lines, readSize) = collectFileLines(file, metaremarks, header)
if int(metaremarks['parsingInfo']['dataType']) == 1:
linesParse = lines
else:
linesParse = lines[int(metaremarks['parsingInfo']['headerLines']):]
#print(len(lines), lines[0], lines[-1])
parseResult = parseFileLines(linesParse,
header,
metaremarks['parsingInfo']['dataType'],
metaremarks,
instrument["Name"],
session,
database,
logPort,
meta.metaid)
#print(file, len(lines))
linesParsed = parseResult["linesParsed"]
linesNotParsed = parseResult["linesNotParsed"]
lastTime = 0
if "lastTime" in parseResult:
lastTime = float(parseResult["lastTime"])
else:
lastTime = 0
# Not always necessarily true but important to ensure that monitoring continues
lastParsedLine = parseResult["linesParsed"] + parseResult["linesNotParsed"]
if(len(parseResult["errors"]) > 1):
for error in parseResult["errors"]:
errors.append(error)
remarks.append({"singleName": file,
"name": file,
"size": os.path.getsize(file),
"lastChangeDate": os.path.getmtime(file),
"linesParsed": linesParsed,
"linesNotParsed": linesNotParsed,
"lastParsedLine": lastParsedLine,
"totalLines": len(lines),
'lastTime': lastTime,
'parsedSize': parseResult["parsedSize"],
'notParsedSize': parseResult["notParsedSize"]
})
#print(parseResult["parsedSize"],readSize)
remarks = sorted(remarks, key=lambda k: k['lastChangeDate'], reverse=True)
meta.parsing[0].remarks = json.dumps(remarks)
if len(customChannels) > 0:
for channel in customChannels:
parseCustomChannel(instrument, channel, session)
session.commit()
except Exception as e:
session.rollback()
traceback.print_exc()
_, _, tb = sys.exc_info()
tbResult = traceback.format_list(traceback.extract_tb(tb)[-1:])[-1]
filename = tbResult.split(',')[0].replace('File', '').replace('"', '')
lineno = tbResult.split(',')[1].replace('line', '')
funname = tbResult.split(',')[2].replace('\n', '').replace(' in ', '')
line = str(e)
theLogSocket = theContext.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'ERROR',
'process': 'COLLECTOR',
'message': str(e),
'filename': filename,
'lineno': lineno,
'funname': funname,
'line': line}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
finally:
theLogSocket = theContext.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'INFO',
'process': 'COLLECTOR',
'message': "Parse UNLOCK - " + instrument["Name"],
'method': 'parseMeta'}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
meta.parsing[0].locked = 0
session.commit()
except Exception as e:
traceback.print_exc()
_, _, tb = sys.exc_info()
tbResult = traceback.format_list(traceback.extract_tb(tb)[-1:])[-1]
filename = tbResult.split(',')[0].replace('File', '').replace('"', '')
lineno = tbResult.split(',')[1].replace('line', '')
funname = tbResult.split(',')[2].replace('\n', '').replace(' in ', '')
line = str(e)
theLogSocket = theContext.socket(zmq.REQ)
theLogSocket.connect("tcp://127.0.0.1:" + str(logPort))
toSend = {
'req': 'LOG',
'type': 'ERROR',
'process': 'COLLECTOR',
'message': str(e),
'filename': filename,
'lineno': lineno,
'funname': funname,
'line': line}
theLogSocket.send(json.dumps(toSend).encode())
theLogSocket.close()
finally:
# connection.close()
for i, el in enumerate(lockList):
if el['instrument'] == instrument["Name"] and el['meta'] == meta.name:
temp = {'server':server,'database': database, 'instrument': instrument["Name"], 'meta': meta.name, 'locked': False}
lockList[i] = temp
def collectFileLines(file,metaremarks,header,offset = 0):
lines = []
with open(file, 'rU') as f:
if offset > 0:
f.seek(offset)
if int(metaremarks['parsingInfo']['dataType']) == 1:
while len(lines) < int(metaremarks['parsingInfo']['colSize']) * len(header) * 2:
line = f.readline()
readSize = f.tell()
if not line: break
lines.append(line)
else:
while len(lines) < 1000:
line = f.readline()
readSize = f.tell()
if not line: break
lines.append(line)
return (lines, readSize)
def interpolateDataArray(dataArray, minResolution, startTime, endTime, screenSize):
for dataObj in dataArray:
dataObj["data"] = interpolateData(
asarray(
dataObj["data"]),
dataObj["channeltype"],
minResolution,
startTime,
endTime,
screenSize)
return dataArray
def interpolateData(data, type, resolution, startTime, endTime, screenSize):
# There is an ethical dilema here. For empty intervals I will interpolate from the closest value in time to start and end times.
# This can be milliseconds but it can also be hours and days. This can lead to bad interpolations in some cases.
if resolution:
# print(startTime,endTime,screenSize)
if(screenSize < 0): # I want to use the smallest resolution posible
timeInt = linspace(int(startTime), | |
# print(ws)
# tmp = ws.compute()
# print('min here', tmp.min(), tmp.max())
ws = da.apply_along_axis(adapt_weights, 0, ws,
how_many_best_views=how_many_best_views,
cumulative_weight_best_views=cumulative_weight_best_views,
shape=(len(ws),))
# tmp = ws.compute()
# print('min here', tmp.min(), tmp.max())
# da.to_zarr(ws, '/tmp/0.zarr', overwrite=True)
ws = dask_affine_transform(ws, np.diag([1.] + [1 / size / bin_factor] * 3), [0] + [-(size - 1) / size / 2.] * 3,
output_chunks=tviews.chunks,
output_shape=tviews.shape, mode='nearest', order=1,
depth={0: 0, 1: depth, 2: depth, 3: depth})
# tmp = ws.compute()
# da.to_zarr(ws, '/tmp/1.zarr')
# print('min here', tmp.min(), tmp.max())
# def normalise(ws):
# wssum = np.sum(ws, 0)
# wssum[wssum == 0] = 1
# res = ws / wssum
# res[:, wssum == 0] = 0
# return res
#
# ws = da.map_blocks(normalise, ws) # ,dtype=np.float32)
return ws
# from dask_image import ndfilters
# def get_weights_dct_dask(tviews,
# params,
# orig_stack_propertiess,
# stack_properties,
# depth=0,
# size=None,
# max_kernel=None,
# gaussian_kernel=None,
# how_many_best_views=2,
# cumulative_weight_best_views=0.9,
# ):
# size_um = 50
# size_px = size_um / stack_properties['spacing'][0]
# size_px = np.min([size_px, stack_properties['size'].min() / 3.])
#
# max_size_px = 64
# bin_factor = int(np.max([1, np.ceil(size_px / max_size_px)]))
# size = int(size_px / bin_factor)
#
# print('size bin', size, bin_factor)
#
# if bin_factor > 1:
# tviews_binned = da.coarsen(np.mean, tviews, {i: bin_factor for i in range(1, 4)}, trim_excess=True)
# else:
# tviews_binned = tviews
#
# tviews_binned_rechunked = tviews_binned.rechunk((tviews_binned.chunksize[0],) + (size,) * 3)
#
# quality_stack_properties = dict()
# quality_stack_properties['spacing'] = np.array(stack_properties['spacing'] * bin_factor)
# quality_stack_properties['size'] = np.array([int(size)] * 3)
# quality_stack_properties['origin'] = stack_properties['origin']
#
# ws = tviews_binned_rechunked.map_blocks(determine_chunk_quality,
# chunks=(tviews_binned_rechunked.shape[0], 1, 1, 1),
# dtype=np.float32,
# **{'orig_stack_propertiess': orig_stack_propertiess,
# 'params': params,
# 'stack_properties': quality_stack_properties,
# }
# )
#
# if max_kernel is None:
# max_kernel = 100 # in um
#
# filter_size = np.max([1, int(max_kernel / (stack_properties['spacing'][0] * size * bin_factor))]) # 100um
# print('weight filter size: %s' % filter_size)
#
# ws = ndfilters.maximum_filter(ws, size=[1] + [int(filter_size)] * 3)
#
# # optimise here?
# ws = ws.map_blocks(adapt_weights, dtype=np.float32, how_many_best_views=how_many_best_views,
# cumulative_weight_best_views=cumulative_weight_best_views)
#
# ws = ndfilters.convolve(ws, weights=np.ones((3, 3, 3))[None, ...] / 3. ** 3)
#
# ws = dask_affine_transform(ws, np.diag([1.] + [1. / size] * 3), [0, 0, 0, 0], output_chunks=tviews.chunks,
# output_shape=tviews.shape)
#
# def normalise(ws):
# wssum = np.sum(ws, 0)
# wssum[wssum == 0] = 1
# res = ws / wssum
# res[:, wssum == 0] = 0
# return res
#
# ws = da.map_blocks(normalise, ws) # ,dtype=np.float32)
# return ws
from scipy.fftpack import dctn
def determine_chunk_quality(vrs):
"""
DCT Shannon Entropy, as in:
Adaptive light-sheet microscopy for long-term, high-resolution imaging in living organisms
http://www.nature.com/articles/nbt.3708
Consider the full bandwidth, so set r0=d0 in their equation
:param vrs:
:return:
"""
# print('dw...')
view_inside_mask = np.array([np.any(v) for v in vrs])
# print(view_inside_mask)
# less than two views inside
if np.sum(view_inside_mask>0) <= 1:
# ws = np.ones(len(view_inside_mask)).astype(np.float32)*np.nan
ws = np.zeros(len(view_inside_mask)).astype(np.float32)#*np.nan
ws[view_inside_mask > 0] = 1.
return ws[:, None, None, None]
vrs = np.copy(vrs)
vrs = vrs[view_inside_mask>0]
axes = [0,1,2]
ds = []
for v in vrs:
if np.sum(v==0) > np.product(v.shape) * (4/5.):
ds.append([0])
continue
elif v.min()<0.0001:
v[v==0] = v[v>0].min() # or nearest neighbor
d = dctn(v,norm='ortho',axes=axes)
# d = dct(dct(dct(v,axis=-1,norm='ortho'),axis=-2,norm='ortho'),axis=-3,norm='ortho')
# cut = size//2
# d[:cut,:cut,:cut] = 0
ds.append(d.flatten())
# l2 norm
dsl2 = np.array([np.sum(np.abs(d)) for d in ds])
# don't divide by zero below
dsl2[dsl2==0] = 1
def abslog(x):
res = np.zeros_like(x)
x = np.abs(x)
res[x==0] = 0
res[x>0] = np.log2(x[x>0])
return res
ws = np.array([-np.sum(np.abs(d)*abslog(d/dsl2[id])) for id,d in enumerate(ds)])
logging.debug('ws: %s' %ws)
# ws = np.array([np.sum(np.abs(d)) for d in ds])
# simple weights in case everything is zero
# if not ws.max():
# ws = np.ones(len(ws))/float(len(ws))
# ws = np.array(ws)
wssum = np.sum(ws)
if wssum>0:
ws = ws/wssum
# res = np.zeros(orig_shape,dtype=np.float32)
# for iw in range(len(ws)):
# res[iw] = ws[iw]
#
# full_ws = np.ones(len(view_inside_mask))*np.nan
full_ws = np.zeros(len(view_inside_mask))
full_ws[view_inside_mask > 0] = ws
# print(full_ws.min(), full_ws.max())
return full_ws[:,None,None,None]
# from scipy.fftpack import dctn
# def determine_chunk_quality(vrs,
# # how_many_best_views,
# # cumulative_weight_best_views,
# orig_stack_propertiess,
# params,
# stack_properties,
# block_info=None,
# ):
#
# """
# DCT Shannon Entropy, as in:
# Adaptive light-sheet microscopy for long-term, high-resolution imaging in living organisms
# http://www.nature.com/articles/nbt.3708
# Consider the full bandwidth, so set r0=d0 in their equation
# :param vrs:
# :return:
# """
# # print('dw...')
#
# curr_origin = []
# # target_origin = []
# for i in range(3):
# # pixel_offset = block_info[0]['chunk-location'][i + 1] * array_info['chunksize'] - array_info['depth']
# pixel_offset = block_info[0]['array-location'][i + 1][0]
# # pixel_offset = block_info[0]['chunk-location'][i + 1] # * block_info[None]['chunk-shape'][i+1]# - array_info['depth']
# # curr_origin.append(ws[0].origin[i] + pixel_offset * ws[0].spacing[i])
# curr_origin.append(stack_properties['origin'][i] + pixel_offset * stack_properties['spacing'][i])
#
# # print('curr_origin', curr_origin)
#
# block_stack_properties = dict()
# # block_stack_properties['size'] = np.array([in_spacing/out_spacing]*3).astype(np.int64)#+2*array_info['depth'])
# # block_stack_properties['size'] = np.array([in_spacing/out_spacing]*3).astype(np.int64)#+2*array_info['depth'])
# block_stack_properties['size'] = np.array(vrs[0].shape).astype(np.int64) # +2*array_info['depth'])
# block_stack_properties['spacing'] = np.array(stack_properties['spacing'])
# block_stack_properties['origin'] = np.array(curr_origin)# - depth * block_stack_properties['spacing']
#
# view_inside_mask = blocks_inside(orig_stack_propertiess,params,block_stack_properties,n_points_per_dim=2)
# # print(view_inside_mask)
#
# # less than two views inside
# if np.sum(view_inside_mask>0) <= 1:
# # ws = np.ones(len(view_inside_mask)).astype(np.float32)*np.nan
# ws = np.zeros(len(view_inside_mask)).astype(np.float32)#*np.nan
# ws[view_inside_mask > 0] = 1.
# return ws[:, None, None, None]
#
# vrs = np.copy(vrs)
#
# vrs = vrs[view_inside_mask>0]
#
# axes = [0,1,2]
# ds = []
# for v in vrs:
#
# if np.sum(v==0) > np.product(v.shape) * (4/5.):
# ds.append([0])
# continue
# elif v.min()<0.0001:
# v[v==0] = v[v>0].min() # or nearest neighbor
#
# d = dctn(v,norm='ortho',axes=axes)
# # d = dct(dct(dct(v,axis=-1,norm='ortho'),axis=-2,norm='ortho'),axis=-3,norm='ortho')
# # cut = size//2
# # d[:cut,:cut,:cut] = 0
# ds.append(d.flatten())
#
# # l2 norm
# dsl2 = np.array([np.sum(np.abs(d)) for d in ds])
# # don't divide by zero below
# dsl2[dsl2==0] = 1
#
# def abslog(x):
# res = np.zeros_like(x)
# x = np.abs(x)
# res[x==0] = 0
# res[x>0] = np.log2(x[x>0])
# return res
#
# ws = np.array([-np.sum(np.abs(d)*abslog(d/dsl2[id])) for id,d in enumerate(ds)])
# logging.debug('ws: %s' %ws)
# # ws = np.array([np.sum(np.abs(d)) for d in ds])
#
# # simple weights in case everything is zero
# # if not ws.max():
# # ws = np.ones(len(ws))/float(len(ws))
#
# # ws = np.array(ws)
# wssum = np.sum(ws)
# if wssum>0:
# ws = ws/wssum
# # res = np.zeros(orig_shape,dtype=np.float32)
# # for iw in range(len(ws)):
# # res[iw] = ws[iw]
# #
# # full_ws = np.ones(len(view_inside_mask))*np.nan
# full_ws = np.zeros(len(view_inside_mask))
# full_ws[view_inside_mask > 0] = ws
#
# return full_ws[:,None,None,None]
# # return res.astype(np.float32)
# # return ws
@io_decorator
def calc_stack_properties_from_views_and_params(views_props, params, spacing=None, mode='sample'):
spacing = np.array(spacing).astype(np.float64)
if spacing is None:
spacing = np.max([view['spacing'] for view in views_props],0)
if mode == 'sample':
volume = get_sample_volume(views_props,params)
elif mode == 'union':
volume = get_union_volume(views_props,params)
elif mode == 'intersection':
volume = get_intersection_volume(views_props,params)
stack_properties = calc_stack_properties_from_volume(volume, spacing)
return stack_properties
def transform_view_and_save_chunked(fn,view,params,iview,stack_properties,chunksize=None):#,pad_end=True):
print('transforming %s' %fn)
params = io_utils.process_input_element(params)
stack_properties = io_utils.process_input_element(stack_properties)
# if pad_end:
# # pad good part of view at the end of the z stack
# print('padding good part at the end of the z stack (only Z1)')
# view = ImageArray(np.pad(view,[[0,5],[0,0],[0,0]],mode='reflect'),origin=view.origin,spacing=view.spacing)
# res = transform_stack_sitk(view, params[iview], stack_properties=stack_properties,interp='bspline')
res = transform_stack_sitk(view, params[iview], stack_properties=stack_properties,interp='linear')
# if chunksize_phys is not None:
#
# chunksize = int(chunksize_phys/stack_properties['spacing'][0])
# chunksize = np.max([50,chunksize])
# chunksize = np.min([500,chunksize])
#
# else:
# chunksize = 100
io_utils.process_output_element(res, fn)
# if chunksize is None:
# chunks = np.min([[100]*3,stack_properties['size']],0)
# chunks = tuple([int(i) for i in chunks])
# else:
# chunks = tuple([int(chunksize)]*3)
#
#
# f = h5py.File(fn,'w')
# # chunks = np.min([[chunksize]*3, res.shape], 0)
# # chunks = tuple(chunks)
# f.create_dataset("array", data=np.array(res), chunks=chunks, compression="gzip")
# f['spacing'] = np.array(stack_properties['spacing'])
# f['origin'] = np.array(stack_properties['origin'])
# f['rotation'] = 0
# f.close()
return fn
@io_decorator
def fuse_dct(views,params,stack_properties):
weights = get_weights_dct(views,params,stack_properties)
return fuse_views_weights(views,params,stack_properties,weights=weights)
@io_decorator
def fuse_views_content(views,
axisOfRotation=1,
gaussian_kernel_size=1.,
window_size=5,max_proj=100):
"""
deprecated fusion
:param views:
:param axisOfRotation:
:param gaussian_kernel_size:
:param window_size:
:param max_proj:
:return:
"""
spacing = views[0].spacing
views = np.array(views)
nviews = len(views)
def fuse_plane(iplane):
print(iplane)
plane_slice = [slice(0,views[0].shape[dim]) for dim in range(views[0].ndim)]
plane_slice[axisOfRotation] = iplane
plane_slice = tuple(plane_slice)
view_plane_slice = (slice(0,len(views)),)+plane_slice
plane = views[view_plane_slice].astype(np.float32)
axes = [0,1]
weights = []
derivss = []
for iview,view in enumerate(plane):
derivs = []
domain = view > 0
ndomain = view==0
ndomain = ndimage.binary_erosion(ndomain,iterations=1)
ndomain = ndimage.binary_dilation(ndomain,iterations=int(np.max([gaussian_kernel_size,window_size])))
for dim in axes:
deriv = np.abs(ndimage.gaussian_filter1d(view,gaussian_kernel_size,axis=dim,order=1))
deriv[ndomain] = 0.00
# this step above induces lines!!!
deriv[ndomain] = 0
deriv[domain] = deriv[domain] / view[domain]
deriv = ndimage.convolve1d(deriv,np.ones(window_size),axis=dim)
deriv = ndimage.filters.maximum_filter1d(deriv,max_proj,axis=dim)
derivs.append(deriv)
derivss.append(derivs)
weight = np.sum([np.abs(deriv)**5 for deriv in derivs],0)
# weight = np.sum([np.abs(deriv)**10 for deriv in derivs],0)
# print('watch out in fusion!')
weight[ndomain] = 0.00
# weight = ndimage.grey_dilation
weights.append(weight)
weights = np.array(weights)
weightsum = np.sum(weights,0)
domain = weightsum > 0
# weights[:,domain] /= weightsum[domain]
weights[:,domain] = weights[:,domain] / weightsum[domain]
result_array[plane_slice] = np.sum([weights[iview]*plane[iview] for iview in range(nviews)],0)
return
# slices = []
# for iplane in range(views[0].shape[axisOfRotation]):
# plane_slice = [slice(0,view[0][dim]) for dim in range(views[0].ndim)]
# plane_slice[axisOfRotation] = iplane
# slices.append((slice(0,len(views)),)+tuple(plane_slice))
# from multiprocessing import Pool
from multiprocessing.dummy import Pool as | |
auto -> decltype(auto))")
elif not force_not_byvalue and any(ret in self.ret["rtype"] for ret in ["*", "&"]):
self.func_ret_step_type_react(
ref, "prefer return by value if suitable")
def func_ret_step_nonconst(self, ref):
if "const" in self.ret["rtype"]:
self.func_ret_step_type_react(
ref, "move semantics could be suppressed by 'const'")
def func_ret_step_rvalueref(self, ref):
if "&&" in self.ret["rtype"]:
self.func_ret_step_type_react(ref, "never return &&")
def func_ret_finalize(self):
self.ret["comments"] = ",".join(self.ret["comments"])
self.funcattr["ret"] = self.ret
def func_attr_initialize(self):
self.funcattr["attr"] = {}
self.attr["pre"] = []
self.attr["post"] = []
def func_attr_step_virtual(self, ref):
if self.virtual:
self.attr["pre"].append("virtual")
def func_attr_step_constexpr(self, ref):
if not self.virtual and \
self.generic_yesno_input("Should it be evaluated at compile-time", ref):
self.attr["pre"].append("constexpr")
def func_attr_step_inline(self, ref):
if not self.virtual and \
not "constexpr" in self.attr["pre"] and \
not "tparams" in self.funcattr and \
self.generic_yesno_input("Is it small, time-critical and part of a thin abstraction", ref):
self.attr["pre"].append("inline")
def func_attr_step_const(self, ref):
if self.funcattr["type"] == "method" and \
not self.generic_yesno_input("Will the method modify the object state", ref):
self.attr["post"].append("const")
def func_attr_step_noexcept(self, ref):
if not self.generic_yesno_input("Can it throw exceptions", ref):
self.attr["post"].append("noexcept")
def func_attr_step_override(self, ref):
if self.virtual and \
self.generic_yesno_input("Does the function override a base method behavior", ref):
self.attr["post"].append("override")
# Override implies virtual
self.attr["pre"] = filter(
lambda x: x != "virtual", self.attr["pre"])
def func_attr_finalize(self):
self.funcattr["attr"]["pre"] = " ".join(self.attr["pre"])
self.funcattr["attr"]["post"] = " ".join(self.attr["post"])
def func_impl_step_pure(self, ref):
if "tparams" not in self.funcattr and \
self.funcattr["type"] == "method" and \
"virtual" in self.funcattr["attr"]["pre"] \
and self.generic_yesno_input("Is it pure", ref):
self.funcattr["pure"] = True
else:
self.funcattr["pure"] = False
class LambdaRecipeCook(FunctionRecipeCook):
'''Recipe to generate a lambda'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.attr = []
def show_dish_console_impl(self):
tpl_h = self.env.get_template("lambda.h")
self.funcattr["annotations"] = self.annotations
print(tpl_h.render(self.funcattr))
def write_dish(self, odir: str):
self.show_dish_console()
print_msg("This recipe has nothing to write to disk", "WARN")
def lambda_root_step_scope(self, ref):
self.local = self.generic_yesno_input(
"Would the scope of the lambda be local (not returned, neither stored on the heap or pass to another thread)", ref)
def lambda_root_step_recursion(self, ref):
self.funcattr["recursive"] = self.generic_yesno_input(
"Should the lambda be recursive", ref)
@strong_input(InputType.INT)
def capture_list_repeat(self):
return RecipeCook.custom_input("Enter capture list item count")
def capture_list_step_react(self, ref, msg):
self.print_live_annotation("TIPS", ref, msg)
if self.generic_yesno_input("Would you like to update captured item according to tips", ref):
self.capture = self.generic_misc_input(
"Enter captured item full desc e.g. a = std::move(b), &c, d ...", ref)
def capture_list_step_name(self, ref):
self.capture = self.generic_misc_input(
"Enter captured item full desc e.g. a = std::move(b), &c, d ...", ref)
def capture_list_step_ref(self, ref):
if self.local and self.capture != "this" and not "&" in self.capture:
self.capture_list_step_react(
ref, "You might want to capture by reference for local scope lambda")
def capture_list_step_nonref(self, ref):
if not self.local and (self.capture == "this" or "&" in self.capture):
self.capture_list_step_react(
ref, "You might want to capture by value for non-local scope lambda")
def capture_list_step_this(self, ref):
if not "captures" in self.funcattr:
return
should_fix = False
if self.capture == "this" and \
any(c in self.funcattr["captures"] for c in ["&", "="]):
should_fix = True
if (self.capture == "&" or self.capture == "=") and \
"this" in self.funcattr["captures"]:
should_fix = True
if should_fix:
self.capture_list_step_react(
ref, "If this is captured, all variables should be captured explicitly")
def capture_list_finalize(self):
self.funcattr.setdefault("captures", []).append(self.capture)
def lambda_attr_step_constexpr(self, ref):
if self.generic_yesno_input("Should it be evaluated at compile-time", ref):
self.attr.append("constexpr")
def lambda_attr_step_mutable(self, ref):
if any("=" in capt or "*this" in capt for capt in self.funcattr["captures"]) and \
self.generic_yesno_input("Should the capture by value items be mutable", ref):
self.attr.append("mutable")
def lambda_attr_finalize(self):
self.funcattr["attr"] = " ".join(self.attr)
class DataStructureRecipeCook(RecipeCook):
'''Recipe to select a data structure'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.ds = "std::vector"
self.comment = ""
def show_dish_console_impl(self):
self.print_live_annotations(self.annotations)
print_msg(f"You should probably use a data structure of type {self.ds}", "SUGGEST")
if self.comment:
print_msg(self.comment, "NOTE")
def write_dish(self, odir: str):
self.show_dish_console()
print_msg("This recipe has nothing to write to disk", "WARN")
@strong_input(InputType.LIST, ["random_access", "insertion/removal", "lookup", ""], "")
def ds_root_step_primary_concern_cb(self, ref):
return RecipeCook.custom_input("Enter primary concern about the data structure (random_access, insertion/removal: insert often, traverse rarely, lookup, default: no special concern)", ref)
def ds_root_step_primary_concern(self, ref):
self.concern = self.ds_root_step_primary_concern_cb(ref)
def ds_root_step_random_access(self, ref):
if self.concern != "random_access":
return
if self.generic_yesno_input("Is it a fixed size container with size known at compile-time", ref):
self.ds = "std::array"
else:
self.ds = "std::vector"
def ds_root_step_insertion_removal(self, ref):
if self.concern != "insertion/removal":
return
self.ds = "std::list"
self.comment = "measure first because std::vector may still meet your criteria for reasonable size"
def ds_root_step_lookup(self, ref):
if self.concern != "lookup":
return
has_value = self.generic_yesno_input(
"Do you need key-value capable data structure (at the opposite of key-only)", ref)
associative = False
if self.generic_yesno_input("Is readibility more important than performance", ref):
associative = True
elif self.generic_yesno_input("Will size be large and/or will there be frequent insert", ref):
associative = True
self.comment = "measure first because sorted std::vector may still meet your criteria"
if associative and self.generic_yesno_input("Do you need ordered keys (no if you don't know)", ref):
self.ds = "std::map" if has_value else "std::set"
elif associative:
self.ds = "std::unordered_map" if has_value else "std::unordered_set"
else:
self.ds = "sorted std::vector of pair" if has_value else "sorted std::vector with maintained uniqueness"
class AlgorithmRecipeCook(RecipeCook):
'''Recipe to select an algorithm'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.algo = ""
def show_dish_console_impl(self):
self.print_live_annotations(self.annotations)
print_msg(f"You should probably use one of the following algorithms -> {self.algo}", "SUGGEST")
def write_dish(self, odir: str):
self.show_dish_console()
print_msg("This recipe has nothing to write to disk", "WARN")
@strong_input(InputType.LIST, ["find", "sort", "traversal"], "find")
def algo_root_step_primary_concern_cb(self, ref):
return RecipeCook.custom_input("Enter primary concern about the algorithm (find, sort, traversal, default: find)", ref)
def algo_root_step_primary_concern(self, ref):
self.concern = self.algo_root_step_primary_concern_cb(ref)
def algo_root_step_find(self, ref):
if self.concern != "find":
return
if self.generic_yesno_input("Do you need to search a sorted range", ref):
self.algo = "std::binary_search, std::lower_bound, std::upper_bound or std::equal_range"
else:
self.algo = "std::find, std::find_if or custom find member if available"
def algo_root_step_sort(self, ref):
if self.concern != "sort":
return
if self.generic_yesno_input("Do you need to separate data according to a criteria", ref):
self.algo = "std::partition or std::stable_partition if relative order of items should be preserved"
elif self.generic_yesno_input("Do you need to know the value of the nth element if the data structure was sorted with all others correctly dispatched around it", ref):
self.algo = "std::nth_element"
elif self.generic_yesno_input("Do you need to sort part of a data structure", ref):
self.algo = "std::partial_sort"
else:
self.algo = "std::sort or std::stable_sort"
def algo_root_step_traversal(self, ref):
if self.concern != "traversal":
return
self.algo = "std::for_each or custom range-based for loop if more convenient"
class ImplRecipeCook(RecipeCook):
'''Recipe for implementation code'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.implattr = {}
def show_dish_console_impl(self):
tpl_h = self.env.get_template("impl.h")
self.implattr["annotations"] = self.annotations
print(tpl_h.render(self.implattr))
def write_dish(self, odir: str):
self.show_dish_console()
print_msg("This recipe has nothing to write to disk", "WARN")
_recpp_cookbook = {
"design": DesignRecipeCook,
"class": ClassRecipeCook,
"function": FunctionRecipeCook,
"lambda": LambdaRecipeCook,
"ds": DataStructureRecipeCook,
"algo": AlgorithmRecipeCook,
"impl": ImplRecipeCook
}
# ---------------------------------------------
# Cookbook actions
# ---------------------------------------------
def load_recipe(recipe_type: str):
'''Load recipe from database file'''
with open(str(Path("recipes") / f"{recipe_type}_recipe.json"), "r", encoding="utf8") as f:
return json.load(f)
def cook(dish: str, display_live_annot: bool, odir: str):
'''
Dispatch cooking step to the correct handler
:param dish: dish name
:param display_live_annot: set to True to display tips while cooking
:param odir: output directory for generated code
'''
cook = _recpp_cookbook[dish](load_recipe(dish))
cook.cook(do_live_annot=display_live_annot)
cook.serve_dish(odir)
def recipe(dish: str, with_annot: bool, whitelist: str, with_header=True):
'''
List steps in a recipe
:param dish: dish name
:param with_annot: display annotations instead of steps
:param whitelist: whitelist used to filter annotation
'''
if dish == "all" and with_annot:
print(_recpp_recipe)
for d in ["design", "class", "function", "lambda", "ds", "algo", "impl"]:
recipe(dish=d, with_annot=with_annot,
whitelist=whitelist, with_header=False)
return
rec = load_recipe(dish)
if with_annot:
if not "*" in whitelist:
whitelist += ",*"
steps = [f"{a['type']} [{a['ref']}]: {a['msg']}" for meta in rec
for a in meta["annotations"]
if (whitelist == "*" or any(w in a['type']
for w in whitelist.split(',')))]
else:
steps = [{"cookstep": meta["id"],
"description": meta["desc"],
"substeps": [f"{s['id']}: {s['desc']}" for s in meta["steps"]]} for meta in rec]
if with_header:
print(_recpp_recipe)
if with_annot and steps:
print('\n'.join(steps))
elif steps:
print(json.dumps(steps, indent=2, sort_keys=False))
# ---------------------------------------------
# Entry point
# ---------------------------------------------
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('--keep', '-k', dest='annot_whitelist', type=str, default='*',
help='desc: annotation whitelist in list annotations mode\n'
'depends: -l -a\n'
'default: *\n'
'example: recpp.py -d class -l -a -k PERF,USA')
parser.add_argument('--output-dir', '-o', dest='odir', type=str, default='',
help='desc: output directory where to store generated code\n'
'warning: is only used in recipe mode for class and | |
<filename>src/pyinterp/rtree.py
# Copyright (c) 2020 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
RTree spatial index
-------------------
"""
from typing import Optional, Tuple
import numpy as np
from . import core
from . import geodetic
class RTree:
"""R*Tree spatial index for geodetic scalar values
"""
def __init__(self,
system: Optional[geodetic.System] = None,
dtype: Optional[np.dtype] = None,
ndims: int = 3):
"""
Initialize a new R*Tree
Args:
system (pyinterp.geodetic.System, optional): WGS of the
coordinate system used to transform equatorial spherical
positions (longitudes, latitudes, altitude) into ECEF
coordinates. If not set the geodetic system used is WGS-84.
Default to ``None``.
dtype (numpy.dtype, optional): Data type of the instance to create.
ndims (int, optional): The number of dimensions of the tree. This
dimension must be at least equal to 3 to store the ECEF
coordinates of the points. Default to ``3``.
"""
dtype = dtype or np.dtype("float64")
if ndims < 3:
raise ValueError("ndims must be >= 3")
if dtype == np.dtype("float64"):
self._instance = getattr(core, f"RTree{ndims}DFloat64")(system)
elif dtype == np.dtype("float32"):
self._instance = getattr(core, f"RTree{ndims}DFloat32")(system)
else:
raise ValueError(f"dtype {dtype} not handled by the object")
self.dtype = dtype
def bounds(
self
) -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]:
"""Returns the box able to contain all values stored in the container.
Return:
tuple: A tuple that contains the coordinates of the minimum and
maximum corners of the box able to contain all values stored in
the container or an empty tuple if there are no values in the
container.
"""
return self._instance.bounds()
def clear(self) -> None:
"""Removes all values stored in the container.
"""
return self._instance.clear()
def __len__(self):
return self._instance.__len__()
def __bool__(self):
return self._instance.__bool__()
def packing(self, coordinates: np.ndarray, values: np.ndarray) -> None:
"""The tree is created using packing algorithm (The old data is erased
before construction.)
Args:
coordinates (numpy.ndarray): a matrix ``(n, ndims)`` where ``n`` is
the number of observations and ``ndims`` is the number of
coordinates in order: longitude and latitude in degrees,
altitude in meters and then the other coordinates defined in
Euclidean space if ``dims`` > 3. If the shape of the matrix is
``(n, ndims)`` then the method considers the altitude constant
and equal to zero.
values (numpy.ndarray): An array of size ``(n)`` containing the
values associated with the coordinates provided
"""
self._instance.packing(coordinates, values)
def insert(self, coordinates: np.ndarray, values: np.ndarray) -> None:
"""Insert new data into the search tree.
Args:
coordinates (numpy.ndarray): a matrix ``(n, ndims)`` where ``n`` is
the number of observations and ``ndims`` is the number of
coordinates in order: longitude and latitude in degrees,
altitude in meters and then the other coordinates defined in
Euclidean space if ``dims`` > 3. If the shape of the matrix is
``(n, ndims)`` then the method considers the altitude constant
and equal to zero.
values (numpy.ndarray): An array of size ``(n)`` containing the
values associated with the coordinates provided
"""
self._instance.insert(coordinates, values)
def query(self,
coordinates: np.ndarray,
k: Optional[int] = 4,
within: Optional[bool] = True,
num_threads: Optional[int] = 0) -> Tuple[np.ndarray, np.ndarray]:
"""Search for the nearest K nearest neighbors of a given point.
Args:
coordinates (numpy.ndarray): a matrix ``(n, ndims)`` where ``n`` is
the number of observations and ``ndims`` is the number of
coordinates in order: longitude and latitude in degrees,
altitude in meters and then the other coordinates defined in
Euclidean space if ``dims`` > 3. If the shape of the matrix is
``(n, ndims)`` then the method considers the altitude constant
and equal to zero.
k (int, optional): The number of nearest neighbors to be searched.
Defaults to ``4``.
within (bool, optional): If true, the method ensures that the
neighbors found are located within the point of interest.
Defaults to ``false``.
num_threads (int, optional): The number of threads to use for the
computation. If 0 all CPUs are used. If 1 is given, no parallel
computing code is used at all, which is useful for debugging.
Defaults to ``0``.
Return:
tuple: A tuple containing a matrix describing for each provided
position, the distance, in meters, between the provided position
and the found neighbors and a matrix containing the value of the
different neighbors found for all provided positions.
"""
return self._instance.query(coordinates, k, within, num_threads)
def inverse_distance_weighting(self,
coordinates: np.ndarray,
radius: Optional[float] = None,
k: Optional[int] = 9,
p: Optional[int] = 2,
within: Optional[bool] = True,
num_threads: Optional[int] = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Interpolation of the value at the requested position by inverse
distance weighting method.
Args:
coordinates (numpy.ndarray): a matrix ``(n, ndims)`` where ``n`` is
the number of observations and ``ndims`` is the number of
coordinates in order: longitude and latitude in degrees,
altitude in meters and then the other coordinates defined in
Euclidean space if ``dims`` > 3. If the shape of the matrix is
``(n, ndims)`` then the method considers the altitude constant
and equal to zero.
radius (float, optional): The maximum radius of the search (m).
Defaults The maximum distance between two points.
k (int, optional): The number of nearest neighbors to be used for
calculating the interpolated value. Defaults to ``9``.
p (float, optional): The power parameters. Defaults to ``2``.
within (bool, optional): If true, the method ensures that the
neighbors found are located around the point of interest. In
other words, this parameter ensures that the calculated values
will not be extrapolated. Defaults to ``true``.
num_threads (int, optional): The number of threads to use for the
computation. If 0 all CPUs are used. If 1 is given, no parallel
computing code is used at all, which is useful for debugging.
Defaults to ``0``.
Return:
tuple: The interpolated value and the number of neighbors used in
the calculation.
"""
return self._instance.inverse_distance_weighting(
coordinates, radius, k, p, within, num_threads)
def radial_basis_function(self,
coordinates: np.ndarray,
radius: Optional[float] = None,
k: Optional[int] = 9,
rbf: Optional[str] = None,
epsilon: Optional[float] = None,
smooth: Optional[float] = 0,
within: Optional[bool] = True,
num_threads: Optional[int] = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Interpolation of the value at the requested position by radial
basis function interpolation.
Args:
coordinates (numpy.ndarray): a matrix ``(n, ndims)`` where ``n`` is
the number of observations and ``ndims`` is the number of
coordinates in order: longitude and latitude in degrees,
altitude in meters and then the other coordinates defined in
Euclidean space if ``dims`` > 3. If the shape of the matrix is
``(n, ndims)`` then the method considers the altitude constant
and equal to zero.
radius (float, optional): The maximum radius of the search (m).
Defaults The maximum distance between two points.
k (int, optional): The number of nearest neighbors to be used for
calculating the interpolated value. Defaults to ``9``.
rbf (str, optional): The radial basis function, based on the
radius, :math:`r`, given by the distance between points.
This parameter can take one of the following values:
* ``cubic``: :math:`\\varphi(r) = r^3`
* ``gaussian``: :math:`\\varphi(r) = e^{-(\\dfrac{r}
{\\varepsilon})^2}`
* ``inverse_multiquadric``: :math:`\\varphi(r) = \\dfrac{1}
{\\sqrt{1+(\\dfrac{r}{\\varepsilon})^2}}`
* ``linear``: :math:`\\varphi(r) = r`
* ``multiquadric``: :math:`\\varphi(r) = \\sqrt{1+(
\\dfrac{r}{\\varepsilon})^2}`
* ``thin_plate``: :math:`\\varphi(r) = r^2 \\ln(r)`
Default to ``multiquadric``
epsilon (float, optional): adjustable constant for gaussian or
multiquadrics functions. Default to the average distance
between nodes.
smooth (float, optional): values greater than zero increase the
smoothness of the approximation. Default to 0 (interpolation)
within (bool, optional): If true, the method ensures that the
neighbors found are located around the point of interest. In
other words, this parameter ensures that the calculated values
will not be extrapolated. Defaults to ``true``.
num_threads (int, optional): The number of threads to use for the
computation. If 0 all CPUs are used. If 1 is given, no parallel
computing code is used at all, which is useful for debugging.
Defaults to ``0``.
Return:
tuple: The interpolated value and the number of neighbors used in
the calculation.
"""
adjustable = ['gaussian', 'inverse_multiquadric', 'multiquadric']
non_adjustable = ['cubic', 'linear', 'thin_plate']
| |
import re
import copy
from collections import namedtuple
import warnings
import astropy.time
import astropy.coordinates
from astropy._erfa import ErfaWarning
import galsim
import numpy as np
import lsst.geom as LsstGeom
from lsst.obs.lsstSim import LsstSimMapper
from lsst.sims.utils import arcsecFromRadians
from . import GalSimCameraWrapper
from .wcsUtils import tanSipWcsFromDetector
from lsst.sims.photUtils import PhotometricParameters
__all__ = ["GalSimDetector", "make_galsim_detector", "LsstObservatory"]
class GalSim_afw_TanSipWCS(galsim.wcs.CelestialWCS):
"""
This class uses methods from lsst.geom and meas_astrom to
fit a TAN-SIP WCS to an afw.cameraGeom.Detector and then wrap
that WCS into something that GalSim can parse.
For documentation on the TAN-SIP WCS see
Shupe and Hook (2008)
http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf
"""
def __init__(self, detectorName, cameraWrapper, obs_metadata, epoch,
photParams=None, wcs=None):
"""
@param [in] detectorName is the name of the detector as stored
by afw
@param [in] cameraWrapper is an instantionat of a GalSimCameraWrapper
@param [in] obs_metadata is an instantiation of ObservationMetaData
characterizing the telescope pointing
@param [in] epoch is the epoch in Julian years of the equinox against
which RA and Dec are measured
@param [in] photParams is an instantiation of PhotometricParameters
(it will contain information about gain, exposure time, etc.)
@param [in] wcs is a kwarg that is used by the method _newOrigin().
The wcs kwarg in this constructor method should not be used by users.
"""
if not isinstance(cameraWrapper, GalSimCameraWrapper):
raise RuntimeError("You must pass GalSim_afw_TanSipWCS "
"an instantiation "
"of GalSimCameraWrapper or one of its daughter "
"classes")
if wcs is None:
self._tanSipWcs = tanSipWcsFromDetector(
detectorName, cameraWrapper, obs_metadata, epoch)
else:
self._tanSipWcs = wcs
self.detectorName = detectorName
self.cameraWrapper = cameraWrapper
self.obs_metadata = obs_metadata
self.photParams = photParams
self.epoch = epoch
# this is needed to match the GalSim v1.5 API
self._color = None
self.fitsHeader = self._tanSipWcs.getFitsMetadata()
self.fitsHeader.set("EXTTYPE", "IMAGE")
if self.obs_metadata.bandpass is not None:
if (not isinstance(self.obs_metadata.bandpass, list) and not
isinstance(self.obs_metadata.bandpass, np.ndarray)):
self.fitsHeader.set("FILTER", self.obs_metadata.bandpass)
if self.obs_metadata.mjd is not None:
self.fitsHeader.set("MJD-OBS", self.obs_metadata.mjd.TAI)
mjd_obs = astropy.time.Time(self.obs_metadata.mjd.TAI, format='mjd')
self.fitsHeader.set('DATE-OBS', mjd_obs.isot)
if self.photParams is not None:
exptime = self.photParams.nexp*self.photParams.exptime
self.fitsHeader.set("EXPTIME", exptime)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'ERFA function', ErfaWarning)
mjd_end = mjd_obs + astropy.time.TimeDelta(exptime, format='sec')
self.fitsHeader.set('DATE-END', mjd_end.isot)
# Add pointing information to FITS header.
if self.obs_metadata.pointingRA is not None:
self.fitsHeader.set('RATEL', obs_metadata.pointingRA)
if self.obs_metadata.pointingDec is not None:
self.fitsHeader.set('DECTEL', obs_metadata.pointingDec)
if self.obs_metadata.rotSkyPos is not None:
self.fitsHeader.set('ROTANGLE', obs_metadata.rotSkyPos)
# Add airmass, needed by jointcal.
if self.obs_metadata.OpsimMetaData is not None:
try:
airmass = self.obs_metadata.OpsimMetaData['airmass']
except KeyError:
pass
else:
self.fitsHeader.set('AIRMASS', airmass)
# Add boilerplate keywords requested by DM.
self.fitsHeader.set('TELESCOP', 'LSST')
self.fitsHeader.set('INSTRUME', 'CAMERA')
self.fitsHeader.set('SIMULATE', True)
self.fitsHeader.set('ORIGIN', 'IMSIM')
observatory = LsstObservatory()
self.fitsHeader.set('OBS-LONG', observatory.getLongitude().asDegrees())
self.fitsHeader.set('OBS-LAT', observatory.getLatitude().asDegrees())
self.fitsHeader.set('OBS-ELEV', observatory.getElevation())
obs_location = observatory.getLocation()
self.fitsHeader.set('OBSGEO-X', obs_location.geocentric[0].value)
self.fitsHeader.set('OBSGEO-Y', obs_location.geocentric[1].value)
self.fitsHeader.set('OBSGEO-Z', obs_location.geocentric[2].value)
self.crpix1 = self.fitsHeader.getScalar("CRPIX1")
self.crpix2 = self.fitsHeader.getScalar("CRPIX2")
self.afw_crpix1 = self.crpix1
self.afw_crpix2 = self.crpix2
self.crval1 = self.fitsHeader.getScalar("CRVAL1")
self.crval2 = self.fitsHeader.getScalar("CRVAL2")
self.origin = galsim.PositionD(x=self.crpix1, y=self.crpix2)
self._color = None
def _radec(self, x, y, color=None):
"""
This is a method required by the GalSim WCS API
Convert pixel coordinates into ra, dec coordinates.
x and y already have crpix1 and crpix2 subtracted from them.
Return ra, dec in radians.
Note: the color arg is ignored. It is only there to
match the GalSim v1.5 API
"""
chipNameList = [self.detectorName]
if type(x) is np.ndarray:
chipNameList = chipNameList * len(x)
ra, dec = self.cameraWrapper._raDecFromPixelCoords(
x + self.afw_crpix1, y + self.afw_crpix2, chipNameList,
obs_metadata=self.obs_metadata, epoch=self.epoch)
if type(x) is np.ndarray:
return (ra, dec)
else:
return (ra[0], dec[0])
def _xy(self, ra, dec):
"""
This is a method required by the GalSim WCS API
Convert ra, dec in radians into x, y in pixel space with crpix
subtracted.
"""
chipNameList = [self.detectorName]
if type(ra) is np.ndarray:
chipNameList = chipNameList * len(ra)
xx, yy = self.cameraWrapper._pixelCoordsFromRaDec(
ra=ra, dec=dec, chipName=chipNameList,
obs_metadata=self.obs_metadata, epoch=self.epoch)
if type(ra) is np.ndarray:
return (xx-self.crpix1, yy-self.crpix2)
return (xx[0]-self.crpix1, yy-self.crpix2)
def _newOrigin(self, origin):
"""
This is a method required by the GalSim WCS API. It returns a
copy of self, but with the pixel-space origin translated to a
new position.
@param [in] origin is an instantiation of a galsim.PositionD
representing the a point in pixel space to which you want to
move the origin of the WCS
@param [out] _newWcs is a WCS identical to self, but with the origin
in pixel space moved to the specified origin
"""
_newWcs = GalSim_afw_TanSipWCS.__new__(GalSim_afw_TanSipWCS)
_newWcs.__dict__.update(self.__dict__)
_newWcs.crpix1 = origin.x
_newWcs.crpix2 = origin.y
_newWcs.fitsHeader = copy.deepcopy(self.fitsHeader)
_newWcs.fitsHeader.set('CRPIX1', origin.x)
_newWcs.fitsHeader.set('CRPIX2', origin.y)
return _newWcs
def _writeHeader(self, header, bounds):
for key in self.fitsHeader.getOrderedNames():
header[key] = self.fitsHeader.getScalar(key)
return header
TreeRingInfo = namedtuple('TreeRingInfo', ['center', 'func'])
class GalSimDetector:
"""
This class stores information about individual detectors for use
by the GalSimInterpreter
"""
def __init__(self, detectorName, cameraWrapper, obs_metadata, epoch,
photParams=None):
"""
@param [in] detectorName is the name of the detector as stored
by afw
@param [in] cameraWrapper is an instantionat of a GalSimCameraWrapper
@param [in] photParams is an instantiation of the
PhotometricParameters class that carries details about the
photometric response of the telescope.
This class will generate its own internal variable
self.fileName which is the name of the detector as it will
appear in the output FITS files
"""
if not isinstance(cameraWrapper, GalSimCameraWrapper):
raise RuntimeError("You must pass GalSimDetector an instantiation "
"of GalSimCameraWrapper or one of its daughter "
"classes")
if detectorName not in cameraWrapper.camera:
raise RuntimeError("detectorName needs to be in the camera "
" wrapped by cameraWrapper when instantiating "
"a GalSimDetector\n"
"%s is not in your cameraWrapper.camera"
% detectorName)
if photParams is None:
raise RuntimeError("You need to specify an instantiation "
"of PhotometricParameters "
"when constructing a GalSimDetector")
self._wcs = None # this will be created when it is actually called for
self._name = detectorName
self._cameraWrapper = cameraWrapper
self._obs_metadata = obs_metadata
self._epoch = epoch
self._detector_type = self._cameraWrapper.camera[self._name].getType()
# Default Tree Ring properties, i.e., no tree rings:
self._tree_rings = TreeRingInfo(galsim.PositionD(0, 0), None)
# We are transposing the coordinates because of the difference
# between how DM defines pixel coordinates and how the
# Camera team defines pixel coordinates
bbox = self._cameraWrapper.getBBox(self._name)
self._xMinPix = bbox.getMinX()
self._xMaxPix = bbox.getMaxX()
self._yMinPix = bbox.getMinY()
self._yMaxPix = bbox.getMaxY()
self._bbox = LsstGeom.Box2D(bbox)
centerPupil = self._cameraWrapper.getCenterPupil(self._name)
self._xCenterArcsec = arcsecFromRadians(centerPupil.getX())
self._yCenterArcsec = arcsecFromRadians(centerPupil.getY())
centerPixel = self._cameraWrapper.getCenterPixel(self._name)
self._xCenterPix = centerPixel.getX()
self._yCenterPix = centerPixel.getY()
self._xMinArcsec = None
self._yMinArcsec = None
self._xMaxArcsec = None
self._yMaxArcsec = None
for cameraPointPupil in \
self._cameraWrapper.getCornerPupilList(self._name):
xx = arcsecFromRadians(cameraPointPupil.getX())
yy = arcsecFromRadians(cameraPointPupil.getY())
if self._xMinArcsec is None or xx < self._xMinArcsec:
self._xMinArcsec = xx
if self._xMaxArcsec is None or xx > self._xMaxArcsec:
self._xMaxArcsec = xx
if self._yMinArcsec is None or yy < self._yMinArcsec:
self._yMinArcsec = yy
if self._yMaxArcsec is None or yy > self._yMaxArcsec:
self._yMaxArcsec = yy
self._photParams = photParams
self._fileName = self._getFileName()
def _getFileName(self):
"""
Format the name of the detector to add to the name of the FITS file
"""
detectorName = self.name
detectorName = detectorName.replace(',', '')
detectorName = detectorName.replace(':', '')
detectorName = detectorName.replace(' ', '_')
return detectorName
def pixelCoordinatesFromRaDec(self, ra, dec):
"""
Convert RA, Dec into pixel coordinates on this detector
@param [in] ra is a numpy array or a float indicating RA in radians
@param [in] dec is a numpy array or a float indicating Dec in radians
@param [out] xPix is a numpy array indicating the x pixel coordinate
@param [out] yPix is a numpy array indicating the y pixel coordinate
"""
nameList = [self.name]
if type(ra) is np.ndarray:
nameList = nameList*len(ra)
raLocal = ra
decLocal = dec
else:
raLocal = np.array([ra])
decLocal = np.array([dec])
xPix, yPix = self._cameraWrapper._pixelCoordsFromRaDec(
raLocal, decLocal, chipName=nameList,
obs_metadata=self._obs_metadata,
epoch=self._epoch)
return xPix, yPix
def pixelCoordinatesFromPupilCoordinates(self, xPupil, yPupil):
"""
Convert pupil coordinates into pixel coordinates on this detector
@param [in] xPupil is a numpy array or a float indicating x
pupil coordinates in radians
@param [in] yPupil a numpy array or a float indicating y pupil
coordinates in radians
@param [out] xPix is a numpy array indicating the x pixel coordinate
@param [out] yPix is a numpy array indicating the y pixel coordinate
"""
nameList = [self._name]
if type(xPupil) is np.ndarray:
nameList = nameList*len(xPupil)
xp = xPupil
yp = yPupil
else:
xp = np.array([xPupil])
yp = np.array([yPupil])
xPix, yPix = self._cameraWrapper.pixelCoordsFromPupilCoords(
xp, yp, nameList, self.obs_metadata)
return xPix, yPix
def containsRaDec(self, ra, dec):
"""
Does a given RA, Dec fall on this detector?
| |
# Store legend identifiers for the run
legend_labels = []
legend_lines = []
legend_labels.append("Individual Runs")
legend_lines.append(Line2D([0], [0], color=colour, linestyle="--",
alpha=alpha))
# Set up the legend variables for the mean over all runs
label = name
legend_labels.append(label)
legend_lines.append(Line2D([0], [0], color=colour, linestyle="-"))
return fig, ax, legend_labels, legend_lines
def mean_with_stderr(data, type_, ind, smooth_over, names,
fig=None, ax=None, figsize=(12, 6),
xlim=None, ylim=None, alpha=0.2,
colours=None, env_type="continuing",
keep_shape=False, xlabel="", ylabel=""):
"""
Plots the average training or evaluation return over all runs with standard
error.
Given a list of data dictionaries of the form returned by main.py, this
function will plot each episodic return for the list of hyperparameter
settings ind each data dictionary. The ind argument is a list, where each
element is a list of hyperparameter settings to plot for the data
dictionary at the same index as this list. For example, if ind[i] = [1, 2],
then plots will be generated for the data dictionary at location i
in the data argument for hyperparameter settings ind[i] = [1, 2].
The smooth_over argument tells how many previous data points to smooth
over
Parameters
----------
data : list of dict
The Python data dictionaries generated from running main.py for the
agents
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of iter of int
The list of lists of hyperparameter settings indices to plot for
each agent. For example [[1, 2], [3, 4]] means that the first agent
plots will use hyperparameter settings indices 1 and 2, while the
second will use 3 and 4.
smooth_over : list of int
The number of previous data points to smooth over for the agent's
plot for each data dictionary. Note that this is *not* the number of
timesteps to smooth over, but rather the number of data points to
smooth over. For example, if you save the return every 1,000
timesteps, then setting this value to 15 will smooth over the last
15 readings, or 15,000 timesteps. For example, [1, 2] will mean that
the plots using the first data dictionary will smooth over the past 1
data points, while the second will smooth over the passed 2 data
points for each hyperparameter setting.
fig : plt.figure
The figure to plot on, by default None. If None, creates a new figure
ax : plt.Axes
The axis to plot on, by default None, If None, creates a new axis
figsize : tuple(int, int)
The size of the figure to plot
names : list of str
The name of the agents, used for the legend
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
alpha : float, optional
The alpha channel for the plot, by default 0.1
colours : list of list of str
The colours to use for each hyperparameter settings plot for each data
dictionary
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
Returns
-------
plt.figure, plt.Axes
The figure and axes of the plot
"""
# Set the colours to be default if not set
if colours is None:
colours = _get_default_colours(ind)
# Set up figure
title = f"Average {type_.title()} Return per Run with Standard Error"
fig, ax = _setup_fig(fig, ax, figsize, xlim=xlim, ylim=ylim, xlabel=xlabel,
ylabel=ylabel, title=title)
# if ax is None and fig is None:
# fig = plt.figure(figsize=figsize)
# ax = fig.add_subplot()
# if xlim is not None:
# ax.set_xlim(xlim)
# if ylim is not None:
# ax.set_ylim(ylim)
# Track the total timesteps per hyperparam setting over all episodes and
# the cumulative timesteps per episode per data dictionary (timesteps
# should be consistent between all hp settings in a single data dict)
total_timesteps = []
cumulative_timesteps = []
for i in range(len(data)):
if type_ == "train":
cumulative_timesteps.append(exp.get_cumulative_timesteps(data[i]
["experiment_data"][ind[i][0]]["runs"]
[0]["train_episode_steps"]))
elif type_ == "eval":
cumulative_timesteps.append(data[i]["experiment_data"][ind[i][0]]
["runs"][0]["timesteps_at_eval"])
else:
raise ValueError("type_ must be one of 'train', 'eval'")
total_timesteps.append(cumulative_timesteps[-1][-1])
# Find the minimum of total trained-for timesteps. Each plot will only
# be plotted on the x-axis until this value
min_timesteps = min(total_timesteps)
# For each data dictionary, find the minimum index where the timestep at
# that index is >= minimum timestep
ind_ge_min_timesteps = []
for cumulative_timesteps_per_data in cumulative_timesteps:
final_ind = np.where(cumulative_timesteps_per_data >=
min_timesteps)[0][0]
# Since indexing will stop right before the minimum, increment it
ind_ge_min_timesteps.append(final_ind + 1)
# Plot all data for all HP settings, only up until the minimum index
# fig, ax = None, None
plot_fn = _plot_mean_with_stderr_continuing if env_type == "continuing" \
else _plot_mean_with_stderr_episodic
for i in range(len(data)):
fig, ax = \
plot_fn(data=data[i], type_=type_,
ind=ind[i], smooth_over=smooth_over[i], name=names[i],
fig=fig, ax=ax, figsize=figsize, xlim=xlim, ylim=ylim,
last_ind=ind_ge_min_timesteps[i], alpha=alpha,
colours=colours[i], keep_shape=keep_shape)
return fig, ax
def _plot_mean_with_stderr_continuing(data, type_, ind, smooth_over, fig=None,
ax=None, figsize=(12, 6), xlim=None,
ylim=None, xlabel=None, ylabel=None,
name="", last_ind=-1,
timestep_multiply=None, alpha=0.2,
colours=None,
keep_shape=False):
"""
Plots the average training or evaluation return over all runs for a single
data dictionary on a continuing environment. Standard error
is plotted as shaded regions.
Parameters
----------
data : dict
The Python data dictionary generated from running main.py
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of int
The list of hyperparameter settings indices to plot
smooth_over : int
The number of previous data points to smooth over. Note that this
is *not* the number of timesteps to smooth over, but rather the number
of data points to smooth over. For example, if you save the return
every 1,000 timesteps, then setting this value to 15 will smooth
over the last 15 readings, or 15,000 timesteps.
fig : plt.figure
The figure to plot on, by default None. If None, creates a new figure
ax : plt.Axes
The axis to plot on, by default None, If None, creates a new axis
figsize : tuple(int, int)
The size of the figure to plot
name : str, optional
The name of the agent, used for the legend
last_ind : int, optional
The index of the last element to plot in the returns list,
by default -1. This is useful if you want to plot many things on the
same axis, but all of which have a different number of elements. This
way, we can plot the first last_ind elements of each returns for each
agent.
timestep_multiply : array_like of float, optional
A value to multiply each timstep by, by default None. This is useful if
your agent does multiple updates per timestep and you want to plot
performance vs. number of updates.
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
alpha : float, optional
The alpha channel for the plot, by default 0.1
colours : list of str
The colours to use for each plot of each hyperparameter setting
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
Returns
-------
plt.figure, plt.Axes
The figure and axes of the plot
Raises
------
ValueError
When an axis is passed but no figure is passed
When an appropriate number of colours is not specified to cover all
hyperparameter settings
"""
if colours is not None and len(colours) != len(ind):
raise ValueError("must have one colour for each hyperparameter " +
"setting")
if timestep_multiply is None:
timestep_multiply = [1] * len(ind)
if ax is not None and fig is None:
raise ValueError("must pass figure when passing axis")
if colours is None:
colours = _get_default_colours(ind)
# Set up figure
if ax is None and fig is None:
title = f"Average {type_.title()} Return per Run with Standard Error"
fig, ax = _setup_fig(fig, ax, figsize, xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel, title=title)
# fig = plt.figure(figsize=figsize)
# ax = fig.add_subplot()
# if xlim is not | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Módulo 'writers' de pydatajson
Contiene los métodos para escribir
- diccionarios con metadatos de catálogos a formato JSON, así como
- listas de diccionarios ("tablas") en formato CSV o XLSX
"""
import io
import json
import logging
import os
import openpyxl as pyxl
import unicodecsv as csv
from openpyxl.styles import Font
from openpyxl.utils import column_index_from_string
from six import string_types, text_type, moves, iteritems
from . import helpers
def write_tables(tables,
path,
column_styles=None,
cell_styles=None,
tables_fields=None,
tables_names=None):
""" Exporta un reporte con varias tablas en CSV o XLSX.
Si la extensión es ".csv" se crean varias tablas agregando el nombre de la
tabla al final del "path". Si la extensión es ".xlsx" todas las tablas se
escriben en el mismo excel.
Args:
table (dict of (list of dicts)): Conjunto de tablas a ser exportadas
donde {
"table_name": [{
"field_name1": "field_value1",
"field_name2": "field_value2",
"field_name3": "field_value3"
}]
}
path (str): Path al archivo CSV o XLSX de exportación.
"""
assert isinstance(path, string_types), "`path` debe ser un string"
assert isinstance(tables, dict), "`table` es dict de listas de dicts"
# Deduzco el formato de archivo de `path` y redirijo según corresponda.
suffix = path.split(".")[-1]
if suffix == "csv":
for table_name, table in tables:
root_path = "".join(path.split(".")[:-1])
table_path = "{}_{}.csv".format(root_path, table_name)
_write_csv_table(table, table_path)
elif suffix == "xlsx":
return _write_xlsx_table(
tables,
path,
column_styles,
cell_styles,
tables_fields=tables_fields,
tables_names=tables_names)
else:
raise ValueError("""
{} no es un sufijo reconocido. Pruebe con .csv o.xlsx""".format(suffix))
def write_table(table, path, column_styles=None, cell_styles=None):
""" Exporta una tabla en el formato deseado (CSV o XLSX).
La extensión del archivo debe ser ".csv" o ".xlsx", y en función de
ella se decidirá qué método usar para escribirlo.
Args:
table (list of dicts): Tabla a ser exportada.
path (str): Path al archivo CSV o XLSX de exportación.
"""
assert isinstance(path, string_types), "`path` debe ser un string"
assert isinstance(table, list), "`table` debe ser una lista de dicts"
# si la tabla está vacía, no escribe nada
if len(table) == 0:
logging.warning("Tabla vacia: no se genera ninguna archivo.")
return
# Sólo sabe escribir listas de diccionarios con información tabular
if not helpers.is_list_of_matching_dicts(table):
raise ValueError("""
La lista ingresada no esta formada por diccionarios con las mismas claves.""")
# Deduzco el formato de archivo de `path` y redirijo según corresponda.
suffix = path.split(".")[-1]
if suffix == "csv":
return _write_csv_table(table, path)
elif suffix == "xlsx":
return _write_xlsx_table(table, path, column_styles, cell_styles)
else:
raise ValueError("""
{} no es un sufijo reconocido. Pruebe con .csv o.xlsx""".format(suffix))
def _write_csv_table(table, path):
if len(table) == 0:
print("No se puede crear un CSV con una tabla vacía.")
return
headers = list(table[0].keys())
with open(path, 'wb') as target_file:
writer = csv.DictWriter(
csvfile=target_file,
fieldnames=headers,
lineterminator="\n",
encoding='utf-8')
writer.writeheader()
for row in table:
writer.writerow(row)
def _apply_styles_to_ws(ws, column_styles=None, cell_styles=None):
# dict de las columnas que corresponden a cada campo
header_row = next(ws.rows)
headers_cols = {cell.value: cell.column for cell in header_row}
# aplica estilos de columnas
if column_styles:
for col, properties in iteritems(column_styles):
# la col puede ser "A" o "nombre_campo"
col = headers_cols.get(col, col)
for prop_name, prop_value in iteritems(properties):
setattr(ws.column_dimensions[col], prop_name, prop_value)
# aplica estilos de celdas
if cell_styles:
for i in moves.xrange(1, ws.max_row + 1):
for j in moves.xrange(1, ws.max_column + 1):
cell = ws.cell(row=i, column=j)
# si el valor es una URL válida, la celda es un hyperlink
if helpers.validate_url(cell.value):
cell.hyperlink = cell.value
cell.font = Font(underline='single', color='0563C1')
for cell_style in cell_styles:
match_all = ("col" not in cell_style
and "row" not in cell_style)
match_row = ("row" in cell_style
and cell_style["row"] == i)
match_col = ("col" in cell_style
and column_index_from_string(
headers_cols.get(cell_style["col"],
cell_style["col"])) == j)
if match_all or match_row or match_col:
for prop_name, prop_value in iteritems(cell_style):
if prop_name != "col" and prop_name != "row":
setattr(cell, prop_name, prop_value)
def _write_xlsx_table(tables,
path,
column_styles=None,
cell_styles=None,
tables_fields=None,
tables_names=None):
column_styles = column_styles or {}
cell_styles = cell_styles or {}
wb = pyxl.Workbook()
if isinstance(tables, dict):
ws_names = []
# primero se usa `tables_names`, y después las extra que pueda haber
if tables_names:
ws_names.extend(tables_names)
for key in list(tables.keys()):
if key not in ws_names:
ws_names.append(key)
# se agregan los nombres de las tablas que falten
else:
ws_names = list(tables.keys())
wb.remove(wb.active)
for table_name in ws_names:
table = tables.get(table_name)
column_styles_sheet = column_styles.get(table_name)
cell_styles_sheet = cell_styles.get(table_name)
_list_table_to_ws(
wb,
table,
table_name,
column_styles_sheet,
cell_styles_sheet,
fields=tables_fields.get(table_name)
if tables_fields else None)
else:
_list_table_to_ws(
wb, tables, column_styles=column_styles, cell_styles=cell_styles)
wb.save(path)
def _list_table_to_ws(wb,
table,
table_name=None,
column_styles=None,
cell_styles=None,
fields=None):
if len(table) == 0 and not fields:
print("No se puede crear una hoja Excel con una tabla vacía.")
return
elif len(table) == 0 and fields:
# la primer fila de la tabla está vacía
table.append({field: None for field in fields})
if table_name:
ws = wb.create_sheet(title=table_name)
else:
ws = wb.active
headers = []
# primero se usan los fields pasados, y después los extra que pueda haber
if fields:
headers.extend(fields)
for key in list(table[0].keys()):
if key not in headers:
headers.append(key)
# se usan los headers de la primera fila para toda la tabla
else:
headers = list(table[0].keys())
ws.append(headers)
for index, row in enumerate(table):
row_values = []
for header in headers:
# si el header no está en la fila, tiene valor nulo
value = row.get(header)
if isinstance(value, list):
row_values.append(",".join(value))
else:
row_values.append(value)
ws.append(row_values)
_apply_styles_to_ws(ws, column_styles, cell_styles)
def write_json(obj, path):
"""Escribo un objeto a un archivo JSON con codificación UTF-8."""
obj_str = text_type(
json.dumps(obj, indent=4, separators=(",", ": "), ensure_ascii=False))
helpers.ensure_dir_exists(os.path.dirname(path))
with io.open(path, "w", encoding='utf-8') as target:
target.write(obj_str)
def write_json_catalog(catalog, path):
"""Función de compatibilidad con releases anteriores."""
write_json(catalog, path)
XLSX_FIELDS = {
"catalog": [
"catalog_identifier", "catalog_title", "catalog_description",
"catalog_publisher_name", "catalog_publisher_mbox", "catalog_issued",
"catalog_modified", "catalog_language", "catalog_superThemeTaxonomy",
"catalog_license", "catalog_homepage", "catalog_rights",
"catalog_spatial"
],
"dataset": [
"dataset_identifier", "dataset_title", "dataset_description",
"dataset_publisher_name", "dataset_publisher_mbox",
"dataset_contactPoint_fn", "dataset_contactPoint_hasEmail",
"dataset_superTheme", "dataset_theme", "dataset_keyword",
"dataset_accrualPeriodicity", "dataset_issued", "dataset_modified",
"dataset_language", "dataset_spatial", "dataset_temporal",
"dataset_landingPage", "dataset_license", "dataset_source",
"dataset_accessLevel"
],
"distribution": [
"dataset_identifier", "dataset_title", "distribution_identifier",
"distribution_title", "distribution_description",
"distribution_downloadURL", "distribution_fileName",
"distribution_format", "distribution_accessURL",
"distribution_mediaType", "distribution_license",
"distribution_byteSize", "distribution_issued",
"distribution_modified", "distribution_rights"
],
"field": [
"dataset_identifier", "dataset_title", "distribution_identifier",
"distribution_title", "field_title", "field_type", "field_description"
],
"theme": ["theme_id", "theme_label", "theme_description"]
}
def _tabulate_nested_dict(nested_dict_row,
field_root="dataset",
parents_roots=[]):
table_dict_row = {}
for key, value in list(nested_dict_row.items()):
if not isinstance(value, dict):
has_root = False
for root in parents_roots + [field_root]:
if key.startswith(root):
has_root = True
if has_root:
table_dict_row[key] = value
else:
table_dict_row["{}_{}".format(field_root, key)] = value
else:
tabulated_keys = _tabulate_nested_dict(value, field_root=key)
for nested_key, nested_value in list(tabulated_keys.items()):
if nested_key.startswith(field_root):
table_dict_row[nested_key] = value
else:
table_dict_row["{}_{}".format(field_root,
nested_key)] = nested_value
return table_dict_row
def _generate_dataset_table(catalog):
headers = []
datasets = []
# tabula diccionarios con estructura, como listas planas de diccionarios
for dataset in catalog.get_datasets(exclude_meta_fields=["distribution"]):
tab_dataset = _tabulate_nested_dict(dataset, "dataset")
datasets.append(tab_dataset)
# agrega todas las keys nuevas que no estén trackeadas
for key in tab_dataset:
if key not in headers:
headers.append(key)
# agrega "nones" para todos aquellos datasets que no tengan alguna key
for dataset in datasets:
for header in headers:
if header not in dataset:
dataset[header] = None
return datasets
def _generate_distribution_table(catalog):
headers = []
distributions = []
# tabula diccionarios con estructura, como listas planas de diccionarios
for distribution in catalog.get_distributions(
exclude_meta_fields=["field"]):
tab_distribution = _tabulate_nested_dict(distribution, "distribution",
["dataset"])
tab_distribution["dataset_title"] = catalog.get_dataset(
tab_distribution["dataset_identifier"]).get("title")
distributions.append(tab_distribution)
# agrega todas las keys nuevas que no estén trackeadas
for key in tab_distribution:
if key not in headers:
headers.append(key)
# agrega "nones" para todos aquellos datasets que no tengan alguna key
for distribution in distributions:
for header in headers:
if header not in distribution:
distribution[header] = None
return distributions
def _generate_field_table(catalog):
headers = []
fields = []
# tabula diccionarios con estructura, como listas planas de diccionarios
for field in catalog.get_fields():
tab_field = _tabulate_nested_dict(field, "field",
["dataset", "distribution"])
tab_field["dataset_title"] = catalog.get_dataset(
tab_field["dataset_identifier"]).get("title")
tab_field["distribution_title"] = catalog.get_distribution(
tab_field["distribution_identifier"]).get("title")
fields.append(tab_field)
# agrega todas las keys nuevas que no estén trackeadas
for key in tab_field:
if key not in headers:
headers.append(key)
# agrega "nones" para todos aquellos datasets que no tengan alguna key
for field in fields:
for header in headers:
if header not in field:
field[header] = None
return fields
def _generate_theme_table(catalog):
headers = []
themes = []
# tabula diccionarios con estructura, como listas planas de diccionarios
for theme in catalog.get_themes():
tab_theme = _tabulate_nested_dict(theme, "theme")
themes.append(tab_theme)
# agrega todas las keys nuevas que no estén trackeadas
for key in tab_theme:
if key not in headers:
headers.append(key)
# | |
active_nc_host.mgmt_dns2 = row[13]
# 14 - "[separate data network (True|False)]" - separate_data_network
if row[14].lower() == 'true':
active_nc_host.separate_data_network = True
else:
active_nc_host.separate_data_network = False
# 15 - "[use data DHCP (True|False)]" - allow_data_dhcp
if row[15].lower() == 'true':
active_nc_host.allow_data_dhcp = True
else:
active_nc_host.allow_data_dhcp = False
# 16 - "[data IP]" - data_ip_address
if row[16] and ip_address_is_valid(row[16]):
active_nc_host.data_ip_address = row[16]
# 17 - "[data netmask (octet structure)]" - data_netmask
if row[17]:
active_nc_host.data_netmask = row[17]
# 18 - "[data gateway]" - data_gateway
if row[18] and ip_address_is_valid(row[18]):
active_nc_host.data_gateway = row[18]
# 19 - "[data DNS 1]" - data_dns1
if row[19] and ip_address_is_valid(row[19]):
active_nc_host.data_dns1 = row[19]
# 20 - "[data DNS 2]" - data_dns2
if row[20] and ip_address_is_valid(row[20]):
active_nc_host.data_dns2 = row[20]
# 21 - "[MTU]" - mtu
if row[21]:
active_nc_host.mtu = row[21]
# 22 - "[require metadata (True|False)]" - v_require_nuage_metadata
if row[22].lower() == 'true':
active_nc_host.v_require_nuage_metadata = True
else:
active_nc_host.v_require_nuage_metadata = False
# 23 - "[generic split activation (True|False)]" - generic_split_activation
if row[23].lower() == 'true':
active_nc_host.generic_split_activation = True
else:
active_nc_host.generic_split_activation = False
# 24 - "[multi VM support (True|False)]" - multi_vmssupport
if row[24].lower() == 'true':
active_nc_host.multi_vmssupport = True
else:
active_nc_host.multi_vmssupport = False
# 25 - "[DHCP relay server (IP)]" - dhcp_relay_server
if row[25] and ip_address_is_valid(row[25]):
active_nc_host.dhcp_relay_server = row[25]
# 26 - "[flow eviction threshold]" - flow_eviction_threshold
if row[26]:
active_nc_host.flow_eviction_threshold = row[26]
# 27 - "[datapath sync timeout]" - datapath_sync_timeout
if row[27]:
active_nc_host.datapath_sync_timeout = row[27]
# 28 - "[network uplink interface]" - network_uplink_interface
if row[28]:
active_nc_host.network_uplink_interface = row[28]
# 29 - "[network uplink IP]" - network_uplink_interface_ip
if row[29] and ip_address_is_valid(row[29]):
active_nc_host.network_uplink_interface_ip = row[29]
# 30 - "[network uplink netmask (octet structure)]" - network_uplink_interface_netmask
if row[30]:
active_nc_host.network_uplink_interface_netmask = row[30]
# 31 - "[network uplink gateway]" - network_uplink_interface_gateway
if row[31] and ip_address_is_valid(row[31]):
active_nc_host.network_uplink_interface_gateway = row[31]
# 32 - "[script URL]" - customized_script_url
if row[32]:
active_nc_host.customized_script_url = row[32]
# 33 - "[personality]" - personality
if row[33].lower() == 'vrs' or row[33].lower() == 'vrs-g':
active_nc_host.personality = row[33]
# 34 - "[site ID]" - site_id
if row[34]:
active_nc_host.site_id = row[34]
# 35 - "[NFS server address (IP)]" - nfs_log_server
if row[35] and ip_address_is_valid(row[35]):
active_nc_host.nfs_log_server = row[35]
# 36 - "[NFS mount path]" - nfs_mount_path
if row[36]:
active_nc_host.nfs_mount_path = row[36]
# 37 - "[primary Nuage controller (IP)]" - primary_nuage_controller
if row[37] and ip_address_is_valid(row[37]):
active_nc_host.primary_nuage_controller = row[37]
# 38 - "[secondary Nuage controller (IP)]" - secondary_nuage_controller
if row[38] and ip_address_is_valid(row[38]):
active_nc_host.secondary_nuage_controller = row[38]
# 39 - "[primary NTP server (IP)]" - ntp_server1
if row[39] and ip_address_is_valid(row[39]):
active_nc_host.ntp_server1 = row[39]
# 40 - "[secondary NTP server (IP)]" - ntp_server2
if row[40] and ip_address_is_valid(row[40]):
active_nc_host.ntp_server2 = row[40]
# 41 - "[static route target IP]" - static_route
if row[41] and ip_address_is_valid(row[41]):
active_nc_host.static_route = row[41]
# 42 - "[static route target IP]" - static_route_netmask
if row[42]:
active_nc_host.static_route_netmask = row[42]
# 43 - "[static route next-hop gateway]" - static_route_gateway
if row[43] and ip_address_is_valid(row[43]):
active_nc_host.static_route_gateway = row[43]
# 44 - "[multicast send interface]" - multicast_send_interface
if row[44]:
active_nc_host.multicast_send_interface = row[44]
# 45 - "[multicast send IP]" - multicast_send_interface_ip
if row[45] and ip_address_is_valid(row[45]):
active_nc_host.multicast_send_interface_ip = row[45]
# 46 - "[multicast send netmask (octet structure)]" - multicast_send_interface_netmask
if row[46]:
active_nc_host.multicast_send_interface_netmask = row[46]
# 47 - "[multicast receive IP]" - multicast_receive_interface_ip
if row[47] and ip_address_is_valid(row[47]):
active_nc_host.multicast_receive_interface_ip = row[47]
# 48 - "[multicast receive netmask (octet structure)]" - multicast_receive_interface_netmask
if row[48]:
active_nc_host.multicast_receive_interface_netmask = row[48]
# 49 - "[Host Agent VM Port Group]"
if row[49]:
agent_portgroup_name = row[49]
# 50 - "[Host Agent VM Datastore]"
if row[50]:
agent_datastore_name = row[50]
else:
logger.warning('Host %s with IP %s from the vCenter Cluster %s is not in the hosts file, it will not be updated.' % (vc_host.name, vc_host_ip, vc_cl.name))
else:
logger.debug('No hosts file specified, only updating host %s with IP %s from the vCenter Cluster %s with the latest network information.' % (vc_host.name, vc_host_ip, vc_cl.name))
# Saving active host
active_nc_host.save()
logger.info('Updated Host %s with IP %s from the vCenter Cluster %s in the Nuage vCenter Deployment Tool' % (vc_host.name, vc_host_ip, vc_cl.name))
# Updating the vCenter host if the flag is set.
if host_configure_agent:
update_host_vm_agent_configuration(logger=logger, vc_cl=vc_cl, vc_host=vc_host, vc_host_ip=vc_host_ip, agent_portgroup_name=agent_portgroup_name, agent_datastore_name=agent_datastore_name)
def update_host_vm_agent_configuration(logger, vc_cl, vc_host, vc_host_ip, agent_portgroup_name, agent_datastore_name):
logger.debug('Configuring the Agent VM settings for Host %s with IP %s from the vCenter Cluster %s' % (vc_host.name, vc_host_ip, vc_cl.name))
# Setting base variables
agent_portgroup = None
agent_datastore = None
# Find the correct Port group
logger.debug('Searching fo Port group %s for Host %s with IP %s from the vCenter Cluster %s' % (agent_portgroup_name, vc_host.name, vc_host_ip, vc_cl.name))
for network in vc_host.network:
logger.debug('Checking Port group %s on Host %s with IP %s from the vCenter Cluster %s' % (network.name, vc_host.name, vc_host_ip, vc_cl.name))
if network.name == agent_portgroup_name:
logger.debug('Found Port group %s for Host %s with IP %s from the vCenter Cluster %s' % (network.name, vc_host.name, vc_host_ip, vc_cl.name))
agent_portgroup = network
break
# If no port group found, stop and do not configure the Agent VM settings of the host
if not agent_portgroup:
logger.error('No Port group named %s found for Host %s with IP %s from the vCenter Cluster %s. Skipping Host Agent VM Settings configuration' % (agent_portgroup_name, vc_host.name, vc_host_ip, vc_cl.name))
return -1
# Finding the first local datastore name if there hasn't been one specified
if not agent_datastore_name:
logger.debug('Searching for first local VMFS datastore on Host %s with IP %s from the vCenter Cluster %s' % (vc_host.name, vc_host_ip, vc_cl.name))
for fs in vc_host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
logger.debug('Checking Datastore %s for Host %s with IP %s from the vCenter Cluster %s' % (fs.volume.name, vc_host.name, vc_host_ip, vc_cl.name))
if fs.volume.type == "VMFS" and fs.volume.local:
logger.debug('Found local Datastore %s for Host %s with IP %s from the vCenter Cluster %s' % (fs.volume.name, vc_host.name, vc_host_ip, vc_cl.name))
agent_datastore_name = fs.volume.name
break
# Find the correct Datastore
logger.debug('Searching for Datastore %s for Host %s with IP %s from the vCenter Cluster %s' % (agent_datastore_name, vc_host.name, vc_host_ip, vc_cl.name))
for datastore in vc_host.datastore:
logger.debug('Checking Datastore %s on Host %s with IP %s from the vCenter Cluster %s' % (datastore.name, vc_host.name, vc_host_ip, vc_cl.name))
if datastore.name == agent_datastore_name:
logger.debug('Found Datastore %s for Host %s with IP %s from the vCenter Cluster %s' % (datastore.name, vc_host.name, vc_host_ip, vc_cl.name))
agent_datastore = datastore
break
# If no datastore found, stop and do not configure the Agent VM settings of the host
if not agent_datastore:
logger.error('No datastore named %s found for Host %s with IP %s from the vCenter Cluster %s. Skipping Host Agent VM Settings configuration' % (agent_datastore_name, vc_host.name, vc_host_ip, vc_cl.name))
return -1
# Setting actual Agent VM settings
logger.debug('Setting the Agent VM settings for Host %s with IP %s from the vCenter Cluster %s to: datastore %s and port group %s' % (vc_host.name, vc_host_ip, vc_cl.name, agent_datastore.name, agent_portgroup.name))
try:
agent_config = vim.host.EsxAgentHostManager.ConfigInfo(agentVmDatastore=agent_datastore, agentVmNetwork=agent_portgroup)
vc_host.configManager.esxAgentHostManager.EsxAgentHostManagerUpdateConfig(configInfo=agent_config)
except vim.fault.HostConfigFault as e:
logger.error('FAILED to configure the Agent VM settings for Host %s with IP %s from the vCenter Cluster %s to: datastore %s and port group %s. Exception: %s' % (vc_host.name, vc_host_ip, vc_cl.name, agent_datastore.name, agent_portgroup.name, e.msg))
return -1
logger.debug('Successful configured the Agent VM settings for Host %s with IP %s from the vCenter Cluster %s to: datastore %s and port group %s' % (vc_host.name, vc_host_ip, vc_cl.name, agent_datastore.name, agent_portgroup.name))
return 0
def ip_address_is_valid(address):
try:
socket.inet_aton(address)
except socket.error:
return False
else:
return True
def main():
"""
Manage the vCenter Integration Node configuration
"""
# Handling arguments
args = get_args()
all_clusters = args.all_clusters
all_datacenters = args.all_datacenters
all_hosts = args.all_hosts
clusters = []
if args.clusters:
clusters = args.clusters
debug = args.debug
allow_fqdn = args.allow_fqdn
datacenters = []
if args.datacenters:
datacenters = args.datacenters
hosts = []
if args.hosts:
hosts = args.hosts
host_configure_agent = args.host_configure_agent
hosts_file = None
if args.hosts_file:
hosts_file = args.hosts_file
hv_username = None
if args.hv_username:
hv_username = args.hv_username
hv_password = None
if args.hv_password:
hv_password = args.hv_password
hv_management_network = None
if args.hv_management_network:
hv_management_network = args.hv_management_network
hv_data_network = None
if args.hv_data_network:
hv_data_network = args.hv_data_network
hv_vm_network = None
if args.hv_vm_network:
hv_vm_network = args.hv_vm_network
hv_mc_network = None
if args.hv_mc_network:
hv_mc_network = args.hv_mc_network
log_file = None
if args.logfile:
| |
bordery[sl] = hy
# "left"
sl = np.s_[2 * nptx + npty:-1]
borderx[sl] = lx
bordery[sl] = ys[::-1]
# close polygon:
borderx[-1] = borderx[0]
bordery[-1] = bordery[0]
ra, dec = self.det_to_world(borderx, bordery)
# TODO: for strange reasons, occasionally ra[0] != ra[-1] and/or
# dec[0] != dec[-1] (even though we close the polygon in the
# previous two lines). Then SphericalPolygon fails because
# points are not closed. Therefore we force it to be closed:
ra[-1] = ra[0]
dec[-1] = dec[0]
self.img_bounding_ra = ra
self.img_bounding_dec = dec
self._bb_radec = (ra, dec)
self._polygon = SphericalPolygon.from_radec(ra, dec)
def _calc_cat_convex_hull(self):
"""
Compute convex hull that bounds the sources in the catalog.
"""
if self.tpwcs is None or self.catalog is None:
return
x = self.catalog['x']
y = self.catalog['y']
if len(x) == 0:
# no points
raise RuntimeError( # pragma: no cover
"Unexpected error: Contact software developer"
)
elif len(x) > 2:
ra, dec = convex_hull(x, y, wcs=self.det_to_world)
# else, for len(x) in [1, 2], use entire image footprint.
# TODO: a more robust algorithm should be implemented to deal with
# len(x) in [1, 2] cases.
# TODO: for strange reasons, occasionally ra[0] != ra[-1] and/or
# dec[0] != dec[-1] (even though we close the polygon in the
# previous two lines). Then SphericalPolygon fails because
# points are not closed. Threfore we force it to be closed:
ra[-1] = ra[0]
dec[-1] = dec[0]
self._bb_radec = (ra, dec)
self._polygon = SphericalPolygon.from_radec(ra, dec)
self._poly_area = np.fabs(self._polygon.area())
def calc_bounding_polygon(self):
"""
Calculate bounding polygon of the image or of the sources in the
catalog (if catalog was set).
"""
# we need image's footprint for later:
self._calc_chip_bounding_polygon()
# create smallest convex spherical polygon bounding all sources:
if self.catalog:
self._calc_cat_convex_hull()
@property
def bb_radec(self):
"""
Get a 2xN `numpy.ndarray` of RA and DEC of the vertices of the
bounding polygon.
"""
return self._bb_radec
class WCSGroupCatalog(object):
"""
A class that holds together `WCSImageCatalog` image catalog objects
whose relative positions are fixed and whose source catalogs should be
fitted together to a reference catalog.
"""
def __init__(self, images, name=None):
"""
Parameters
----------
images: list of WCSImageCatalog
A list of `WCSImageCatalog` image catalogs.
name: str, None, optional
Name of the group.
"""
self._catalog = None
if isinstance(images, WCSImageCatalog):
self._images = [images]
if images.catalog is None:
raise ValueError("Each input WCS image catalog must have a "
"valid catalog.")
elif hasattr(images, '__iter__'):
if not images:
raise ValueError("List of images cannot be empty.")
self._images = []
for im in images:
if not isinstance(im, WCSImageCatalog):
raise TypeError("Each element of the 'images' parameter "
"must be an 'WCSImageCatalog' object.")
if im.catalog is None:
raise ValueError("Each input WCS image catalog must have "
"a valid catalog.")
self._images.append(im)
else:
raise TypeError("Parameter 'images' must be either a single "
"'WCSImageCatalog' object or a list of "
"'WCSImageCatalog' objects")
self._name = name
self._catalog = self.create_group_catalog()
self.update_bounding_polygon()
@property
def name(self):
""" Get/set :py:class:`WCSImageCatalog` object's name.
"""
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def polygon(self):
""" Get image's (or catalog's) bounding spherical polygon.
"""
return self._polygon
def intersection(self, wcsim):
"""
Compute intersection of this `WCSGroupCatalog` object and another
`WCSImageCatalog`, `WCSGroupCatalog`, or
:py:class:`~spherical_geometry.polygon.SphericalPolygon`
object.
Parameters
----------
wcsim: WCSImageCatalog, WCSGroupCatalog, SphericalPolygon
Another object that should be intersected with this
`WCSGroupCatalog`.
Returns
-------
polygon: SphericalPolygon
A :py:class:`~spherical_geometry.polygon.SphericalPolygon` that is
the intersection of this `WCSGroupCatalog` and `wcsim`.
"""
if isinstance(wcsim, (WCSGroupCatalog, WCSImageCatalog)):
return self._polygon.intersection(wcsim.polygon)
else:
return self._polygon.intersection(wcsim)
# TODO: due to a bug in the sphere package, see
# https://github.com/spacetelescope/sphere/issues/74
# intersections with polygons formed as union does not work.
# For this reason I re-implement 'intersection_area' below with
# a workaround for the bug.
# The original implementation should be uncommented once the bug
# is fixed.
#
# def intersection_area(self, wcsim):
# """ Calculate the area of the intersection polygon.
# """
# return np.fabs(self.intersection(wcsim).area())
def intersection_area(self, wcsim):
""" Calculate the area of the intersection polygon.
"""
return sum(im.intersection_area(wcsim) for im in self._images)
def _guarded_intersection_area(self, wcsim):
"""
Calculate the area of the intersection polygon. If some
intersections fail due to a bug/limitation of ``spherical_geometry``
then the area of the valid intersections will be returned.
If images do not intersect or intersection fails, 0 will be returned.
"""
return sum(im._guarded_intersection_area(wcsim) for im in self._images)
def update_bounding_polygon(self):
""" Recompute bounding polygons of the member images.
"""
polygons = [im.polygon for im in self._images]
if polygons:
try:
self._polygon = SphericalPolygon.multi_union(polygons)
except MalformedPolygonError:
log.warning(
"MalformedPolygonError in spherical_geometry. Using "
"convex hull instead of multi_union. Alignment order "
"may be sub-optimal."
)
refcat = RefCatalog(self._catalog)
self._polygon = refcat.polygon
else:
self._polygon = SphericalPolygon([])
def __len__(self):
return len(self._images)
def __getitem__(self, idx):
return self._images[idx]
def __iter__(self):
for image in self._images:
yield image
@property
def catalog(self):
""" Get/set image's catalog.
"""
return self._catalog
def create_group_catalog(self):
"""
Combine member's image catalogs into a single group's catalog.
Returns
-------
group_catalog: astropy.table.Table
Combined group catalog.
"""
catalogs = []
catno = 0
has_weights = None
cat_names = []
for image in self._images:
catlen = len(image.catalog)
if catlen == 0:
continue
cat_name = image.catalog.meta.get(
'name',
image.name if image.name else 'Unnamed'
)
cat_names.append(cat_name)
if has_weights is None:
has_weights = 'weight' in image.catalog.colnames
elif has_weights != ('weight' in image.catalog.colnames):
raise KeyError("Non-empty catalogs in a group must all "
"either have or not have 'weight' column.")
if image.name is None:
catname = 'Catalog #{:d}'.format(catno)
else:
catname = image.name
col_catname = table.MaskedColumn(catlen * [catname],
name='cat_name')
col_imcatidx = table.MaskedColumn(catlen * [catno],
name='_imcat_idx')
col_id = table.MaskedColumn(image.catalog['id'])
col_x = table.MaskedColumn(image.catalog['x'], dtype=np.double)
col_y = table.MaskedColumn(image.catalog['y'], dtype=np.double)
ra, dec = image.det_to_world(
image.catalog['x'], image.catalog['y']
)
col_ra = table.MaskedColumn(ra, dtype=np.double, name='RA')
col_dec = table.MaskedColumn(dec, dtype=np.double, name='DEC')
if has_weights:
col_wght = table.MaskedColumn(image.catalog['weight'],
dtype=np.double)
cat = table.Table(
[col_imcatidx, col_catname, col_id, col_x,
col_y, col_ra, col_dec, col_wght],
masked=True
)
else:
cat = table.Table(
[col_imcatidx, col_catname, col_id, col_x,
col_y, col_ra, col_dec],
masked=True
)
catalogs.append(cat)
catno += 1
catname = os.path.commonprefix(cat_names) if cat_names else None
if catno:
cat = table.vstack(catalogs, join_type='exact')
else:
# no catalogs with sources. Create an empty table with required
# columns and types:
image = self._images[0]
if image.name is None:
catname = 'Catalog #{:d}'.format(catno)
else:
catname = image.name
col_catname = table.MaskedColumn([catname], name='cat_name')
col_catname = col_catname[[False]]
col_imcatidx = table.MaskedColumn([], dtype=int,
name='_imcat_idx')
col_id = table.MaskedColumn(image.catalog['id'])
col_x = table.MaskedColumn([], name='x', dtype=np.double)
col_y = table.MaskedColumn([], name='y', dtype=np.double)
col_ra = table.MaskedColumn([], name='RA', dtype=np.double)
col_dec = table.MaskedColumn([], name='DEC', dtype=np.double)
cat = table.Table(
[col_imcatidx, col_catname, col_id, col_x,
col_y, col_ra, col_dec],
masked=True
)
if catname:
cat.meta['name'] = catname
return cat
def get_unmatched_cat(self):
"""
Retrieve only those sources from the catalog that have **not** been
matched to the sources in the reference catalog.
"""
mask = self._catalog['matched_ref_id'].mask
return self._catalog[mask]
def get_matched_cat(self):
"""
Retrieve only those sources from the catalog that **have been**
matched to the sources in the reference catalog.
"""
mask = np.logical_not(self._catalog['matched_ref_id'].mask)
return self._catalog[mask]
def recalc_catalog_radec(self):
""" Recalculate RA and DEC of the sources in the catalog.
"""
for k, image in enumerate(self._images):
idx = (self._catalog['_imcat_idx'] == k)
if not np.any(idx):
continue
ra, dec = image.det_to_world(
self._catalog['x'][idx], self._catalog['y'][idx]
)
self._catalog['RA'][idx] = ra
self._catalog['DEC'][idx] = dec
def calc_tanp_xy(self, tanplane_wcs):
"""
Compute x- and y-positions of the sources from the image catalog
in the tangent plane. This creates the following
columns in the catalog's table: ``'TPx'`` and ``'TPy'``.
Parameters
----------
tanplane_wcs: TPWCS
A `TPWCS` object that will provide transformations to
the tangent plane to which sources of this catalog a should be
"projected".
"""
if 'RA' not in self._catalog.colnames or \
'DEC' not in self._catalog.colnames:
raise RuntimeError("'recalc_catalog_radec()' should have been run "
"prior to calc_tanp_xy()")
# compute x & y in the reference WCS:
xtp, ytp = tanplane_wcs.world_to_tanp(self.catalog['RA'],
self.catalog['DEC'])
self._catalog['TPx'] = table.MaskedColumn(
xtp, name='TPx', dtype=np.double, mask=False
)
self._catalog['TPy'] = table.MaskedColumn(
ytp, name='TPy', dtype=np.double, mask=False
)
def match2ref(self, refcat, match=None):
""" Uses ``xyxymatch`` to cross-match sources between this catalog and
a reference catalog.
Parameters
----------
refcat: RefCatalog
A `RefCatalog` object that contains a catalog of reference sources
as well | |
1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, | |
<reponame>pymango/pymango
# The _mango_open_core module is a shared-object library
# which exports python functions and classes.
import mango.mpi
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_open_ndarray_converters as _mango_open_ndarray_converters_so
from . import _mango_open_core as _mango_open_core_so
sys.setdlopenflags(_flags)
else:
from . import _mango_open_ndarray_converters as _mango_open_ndarray_converters_so
from . import _mango_open_core as _mango_open_core_so
from ._mango_open_core import *
import scipy as sp
import numpy as np
_mango_open_core_so._setNumpyDTypeType(type(sp.dtype("int32")))
import re
import sys
from mango.utils import ModuleObjectFactory as _ModuleObjectFactory
logger,rootLogger = mango.mpi.getLoggers(__name__)
if (mango.mpi.haveMpi4py):
_mango_open_core_so._setMpi4pyPyMPICommTypeObject(type(mango.mpi.Comm()))
_mango_open_core_so._setMpi4pyPyMPICartcommTypeObject(type(mango.mpi.Cartcomm()))
# Class definition for (sphinx) documentation only.
class mtype(_mango_open_core_so.mtype):
"""
Image type for mango :obj:`Dds` distributed array objects.
"""
def __init__(self, name):
_mango_open_core_so.mtype.__init__(self, name)
def __str__(self):
return self.name()
def __eq__(self, other):
return (
issubclass(other.__class__, _mango_open_core_so.mtype)
and
(self.name() == other.name())
)
# Class definition for (sphinx) documentation only.
class gtype(_mango_open_core_so.gtype):
"""
Type for mango non-array data types.
"""
def __init__(self, name):
_mango_open_core_so.gtype.__init__(self, name)
def __str__(self):
return self.name()
def __eq__(self, other):
return (
issubclass(other.__class__, _mango_open_core_so.gtype)
and
(self.name() == other.name())
)
# To distinguish betweem 'mtype' parameter name and 'mtype' class.
_mtypeCls = _mango_open_core_so.mtype
def _ddsSetItem(self, *args):
"""
Set value at specified voxel index. May be called as dds[zIdx,yIdx,xIdx]=v
or as dds[(zIdx,yIdx,xIdx)]=v or as dds[scalarIdx]=v.
"""
sfx = self.__class__.__name__
if (len(args) == 4):
return getattr(_mango_open_core_so, "_setitemzyxidx" + sfx)(self, *args)
elif (len(args) == 2):
idx = args[0]
if (hasattr(idx, "__getitem__")):
if (len(idx) == 3):
return getattr(_mango_open_core_so, "_setitemseqidx" + sfx)(self, *args)
else:
raise ValueError("Expected 3D index, got len(idx)=%s" % len(idx))
else:
return getattr(_mango_open_core_so, "_setitemscalaridx" + sfx)(self, *args)
raise RuntimeError("Expected 4 or 2 arguments, got %s arguments: %s" % (len(args), str(args)))
def _ddsGetItem(self, *args):
"""
Return value at specified voxel index. May be called as dds[zIdx,yIdx,xIdx]
or as dds[(zIdx,yIdx,xIdx)] or as dds[scalarIdx].
"""
sfx = self.__class__.__name__
if (len(args) == 3):
return getattr(_mango_open_core_so, "_getitemzyxidx" + sfx)(self, *args)
elif (len(args) == 1):
idx = args[0]
if (hasattr(idx, "__getitem__")):
if (len(idx) == 3):
return getattr(_mango_open_core_so, "_getitemseqidx" + sfx)(self, *args)
else:
raise ValueError("Expected 3D index, got len(idx)=%s" % len(idx))
else:
return getattr(_mango_open_core_so, "_getitemscalaridx" + sfx)(self, *args)
raise RuntimeError("Expected 3 or 1 arguments, got %s arguments: %s" % (len(args), str(args)))
def _ddsCopy(self, *args, **kwargs):
"""
Creates a (deep) copy of this :obj:`Dds` object (halo elements not copied or updated).
:type self: :obj:`Dds`
:param self: Copies data elements from dds into newly created :obj:`Dds` object.
:type dtype: :samp:`scipy.dtype`
:param dtype: Specifies the type of elements in the newly created :obj:`Dds` object,
e.g. "uint16", "float32", etc.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango image mtype of the Dds, one of:
"Tomographic_Data", "Segmented_Data", etc.
:type mpidims: sequence
:param mpidims: The cartesian MPI process layout for the new created :obj:`Dds`.
:rtype: :obj:`Dds`
:return: New :obj:`Dds` instance of same dimensions and layout as :samp:`self`
with non-halo elements copied from :samp:`self`.
"""
return mango.copy(*args, **kwargs)
_ddsMtypeList = [_mtypeCls(n) for n in _mango_open_core_so._getMtypeNameList()]
_ddsMtypeNameList = []
for mt in _ddsMtypeList:
_ddsMtypeNameList.append(mt.name())
_ddsMtypeNameList.append(mt.shortName())
if (mt.fileNameBase() != mt.shortName()):
_ddsMtypeNameList.append(mt.fileNameBase())
_ddsDtypeList = []
_ddsTypeList = []
def getDdsMtypeList():
"""
Returns the list of possible :obj:`mango.mtype` objects for
the :obj:`mango.Dds` :samp:`mtype` attribute.
:rtype: :obj:`list` of :obj:`mango.mtype`
:return: List of possible :obj:`mango.mtype` objects.
"""
return _ddsMtypeList
def __addClassAttributes():
"""
Adds some attributes to the _mango_open_core_so._Dds_(.*) classes, in particular
__getitem__ method, __setitem__ method, and the dtype attribute.
"""
thingStrList = dir(_mango_open_core_so)
ddsClsRegEx = re.compile("_Dds_(.*)")
for thingStr in thingStrList:
mtch = ddsClsRegEx.match(thingStr)
if (mtch != None):
thing = getattr(_mango_open_core_so, thingStr)
thing.dtype = sp.dtype(mtch.group(1))
_ddsDtypeList.append(thing.dtype)
thing.__getitem__ = _ddsGetItem
thing.__setitem__ = _ddsSetItem
thing.copy = _ddsCopy
_ddsTypeList.append(thing)
__addClassAttributes()
def isDds(obj):
"""
Returns whether specified :samp:`obj` is an instance of a :obj:`mango.Dds`.
:type obj: :obj:`object`
:param obj: Object to be tested.
:rtype: :obj:`bool`
:return: :samp:`True` is an instance of a :obj:`mango.Dds` distributed array.
"""
for ddsType in _ddsTypeList:
if (isinstance(obj, ddsType)):
return True
return False
def empty(shape, dtype=None, mtype=None, halo=(0,0,0), mpidims=(0,0,0), origin=(0,0,0), subdshape=None, subdorigin=None):
if (isinstance(halo, int) or ((sys.version_info.major < 3) and isinstance(halo, long))):
# Make halo a 3-sequence
halo = [halo,]*3
if (mtype != None):
mtype = _mtypeCls(str(mtype))
if ((dtype != None) and (mtype.dtype != sp.dtype(dtype))):
raise \
ValueError(
"dtype=%s argument does not agree with mtype.dtype=%s argument (mtype=%s)"
%
(dtype, mtype.dtype, mtype)
)
dtype = mtype.dtype
if ((mtype is None) and (dtype is None)):
dtype = "float64"
if (len(shape) != 3):
raise \
ValueError(
(
"Can only create 3D shaped Dds: %dD Dds shape=%s requested."
)
%
(len(shape), shape)
)
if (len(origin) != 3):
raise \
ValueError(
(
"Need 3D origin index, got origin=%s."
)
%
(origin, )
)
if (len(mpidims) != 3):
raise \
ValueError(
(
"Need 3D mpidims layout, got mpidims=%s."
)
%
(mpidims, )
)
createFunc = getattr(_mango_open_core_so, "_create" + "_Dds_" + str(sp.dtype(dtype)))
try:
newDds = \
createFunc(
gshape=shape,
gorigin=origin,
haloshape=halo,
mpidims=mpidims,
lshape=subdshape,
lorigin=subdorigin
)
except RuntimeError as e:
logger.error(
"Error creating Dds: %s(gshape=%s, gorigin=%s, haloshape=%s, mpidims=%s, lshape=%s, lorigin=%s)"
%
(createFunc, shape, origin, halo, mpidims, lshape, lorigin)
)
raise
newDds.mtype = mtype
return newDds
empty.__doc__=\
"""
Creates an uninitialised :obj:`Dds` object of specified global size.
:type shape: 3-sequence
:param shape: Three element sequence indicating the global shape
of the array e.g. :samp:`(zSz, ySz, xSz)`.
:type dtype: :samp:`scipy.dtype`
:param dtype: The data-type of the :obj:`Dds` elements, one of:
%s.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango image mtype of the :obj:`Dds`, one of:
%s.
:type halo: 3-sequence or int
:param halo: The number of voxels added as a layer around each MPI-subdomain.
:type mpidims: 3-sequence
:param mpidims: The shape of the cartesian MPI process layout.
:type origin: 3-sequence
:param origin: The global indexing origin for the :obj:`Dds` array.
:type subdshape: 3-sequence
:param subdshape: The sub-domain shape for the :obj:`Dds` on this MPI process.
:type subdorigin: 3-sequence
:param subdorigin: The global indexing origin for the :obj:`Dds` sub-domain on this MPI process.
:rtype: :obj:`Dds`
:return: :obj:`Dds` with uninitialised (arbitrary) array elements.
""" % \
(
", ".join(["'" + e + "'" for e in map(str,_ddsDtypeList)]),
", ".join(["'" + e + "'" for e in map(str,_ddsMtypeNameList)])
)
def empty_like(dds, shape=None, dtype=None, mtype=None, halo=None, mpidims=None, origin=None, subdshape=None, subdorigin=None):
"""
Create a new :obj:`Dds` object of same type/shape/MPI-layout as a specified :obj:`Dds` object.
:type dds: :obj:`Dds`
:param dds: The type/shape/MPI-layout determine the same attributes of the returned :obj:`Dds`.
:type shape: 3-sequence
:param shape: Overrides :samp:`dds.shape`.
:type dtype: scipy.dtype
:param dtype: Overrides :samp:`dds.dtype`.
:type mtype: mango.mtype
:param mtype: Overrides :samp:`dds.mtype`.
:type halo: 3-sequence
:param halo: Overrides :samp:`dds.halo`.
:type mpidims: 3-sequence
:param mpidims: Overrides :samp:`dds.mpi.shape`.
:type origin: 3-sequence
:param origin: Overrides :samp:`dds.origin`.
:type subdshape: 3-sequence
:param subdshape: Overrides :samp:`dds.subd.shape`.
:type subdorigin: 3-sequence
:param subdorigin: Overrides :samp:`dds.subd.origin`.
:rtype: :obj:`Dds`
:return: :obj:`Dds` of uninitialised (arbitrary) data with the same type,
shape and MPI-layout as :samp:`dds`.
"""
if ((shape is None) and (origin is None) and (subdorigin is None) and (mpidims is None)):
subdorigin = dds.subd.origin
if ((shape is None) and (origin is None) and (subdshape is None) and (mpidims is None)):
subdshape = dds.subd.shape
if (shape is None):
shape = dds.shape
if (mtype is None and dtype is None):
if (hasattr(dds, "mtype")):
mtype = dds.mtype
dtype = dds.dtype
if (halo is None):
halo = dds.halo
if (mpidims is None):
mpidims = dds.mpi.shape
if (origin is None):
origin = dds.origin
newDds = \
empty(
shape=shape,
dtype=dtype,
mtype=mtype,
halo=halo,
mpidims=mpidims,
origin=origin,
subdshape=subdshape,
subdorigin=subdorigin
)
newDds.copyMetaData(dds)
return newDds
def zeros(shape, dtype=None, mtype=None, halo=(0,0,0), mpidims=(0,0,0), origin=(0,0,0)):
dds = empty(shape=shape, dtype=dtype, mtype=mtype, halo=halo, mpidims=mpidims, origin=origin)
dds.setAllToValue(dds.dtype.type(0))
return dds
zeros.__doc__=\
"""
Creates an zero-initialised :obj:`Dds` object of specified global size.
:type shape: 3-sequence
:param shape: Three element sequence indicating the global shape
of the array e.g. :samp:`(zSz, ySz, xSz)`.
:type dtype: :samp:`scipy.dtype`
:param dtype: The data-type of the :obj:`Dds` elements, one of:
%s.
:type mtype: :obj:`mango.mtype`
:param mtype: The mango image mtype of the :obj:`Dds`, one of:
%s.
:type halo: 3-sequence or int
:param halo: The number of voxels added as a layer around each MPI-subdomain.
:type mpidims: 3-sequence
:param mpidims: The shape of the cartesian MPI process layout.
:type origin: 3-sequence
:param origin: The global indexing origin for the :obj:`Dds` array.
:rtype: :obj:`Dds`
:return: :obj:`Dds` with array elements initialised to | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from collections import defaultdict
from dateutil import rrule
from django.conf import settings
from django.db import connection
from ralph_scrooge.models import (
CostDateStatus,
DailyCost,
DynamicExtraCostType,
ExtraCostType,
PricingService,
ServiceEnvironment,
Team,
UsageType,
)
from ralph_scrooge.plugins import plugin_runner as plugin_runner
from ralph_scrooge.plugins.cost.base import (
NoPriceCostError,
MultiplePriceCostError,
)
from ralph_scrooge.plugins.validations import DataForReportValidator
from ralph_scrooge.utils.common import memoize, AttributeDict
logger = logging.getLogger(__name__)
class VerifiedDailyCostsExistsError(Exception):
pass
class Collector(object):
"""
Costs collector
Collects all costs (of usage types, teams, extra costs and pricing
services) and process them (save in database as tree structure).
All plugins used by collector should return data in following format:
{
<service_environment_id>: [
{
'type': <type or type_id>,
'cost': <cost>,
'_children': [
{
'type': <type or type_id>,
'cost': <cost>,
'_children': [...],
**kwargs
}
],
**kwargs
}
],
<service_environment_id>: [...],
...
}
Notice that:
* kwargs are additional params, that should match DailyCost fields, ex.
pricing_object(_id), value etc.
* _children is optional field, and could be nested infinitely
Example:
{
service_environment1.id: [
{'type': <BaseUsage 1>, 'costs': 100,},
{'type': 2, 'costs': 200, 'value': 40, 'pricing_object_id': 33,},
],
service_environment2.id: [
{
'type': <BaseUsage 1>,
'costs': 100,
'_children': [
{'type': <BaseUsage 2>, 'costs': 500,},
...
]
},
}
"""
def process_period(
self,
start,
end,
forecast,
force_recalculation=False,
**kwargs
):
# calculate costs only if were not calculated for some date, unless
# force_recalculation is True
dates = self._get_dates(start, end, forecast, force_recalculation)
for day in dates:
try:
self.process(
day,
forecast=forecast,
**kwargs
)
yield day, True
except Exception as e:
logger.exception(e)
yield day, False
def _get_dates(self, start, end, forecast, force_recalculation):
"""
Return dates between start and end for which costs were not previously
calculated.
"""
days = [d.date() for d in rrule.rrule(
rrule.DAILY,
dtstart=start,
until=end
)]
if force_recalculation:
return days
else:
return sorted(set(days) - set(CostDateStatus.objects.filter(
date__gte=start,
date__lte=end,
**{'forecast_calculated' if forecast else 'calculated': True}
).values_list('date', flat=True)))
def process(
self,
date,
forecast=False,
delete_verified=False,
plugins=None,
perform_validation=False,
):
"""
Process costs for single date.
Process parts:
0) (optionally) validate costs/prices/usages etc. that will be taken
into account here
1) collect costs from all plugins
2) create DailyCost instances
3) delete previously saved costs (if they were not verified, except
sitution, where delete_verified=True was passed explicitly)
4) save costs in database in tree format
"""
logger.info('Calculating costs (forecast: {}) for date {}'.format(
forecast,
date,
))
self._verify_accepted_costs(date, forecast, delete_verified)
if settings.ENABLE_DATA_FOR_REPORT_VALIDATION and perform_validation:
logger.info('Performing validation of data for costs calculation.')
DataForReportValidator(date, forecast=forecast).validate()
costs = self._collect_costs(
date=date,
forecast=forecast,
plugins=plugins,
)
logger.info('Costs calculated for date {}'.format(date))
return costs
def save_period_costs(self, start, end, forecast, costs):
"""
Save costs for period of time.
:param costs: list of DailyCost instances
"""
self._delete_daily_period_costs(start, end, forecast)
self._save_costs(costs)
self._update_status_period(start, end, forecast)
logger.info('Costs saved for dates {}-{}'.format(start, end))
def _delete_daily_period_costs(self, start, end, forecast):
"""
Delete previously saved costs between start and end (including forecast
flag).
"""
logger.info('Deleting previously saved costs between {} and {}'.format(
start,
end,
))
cursor = connection.cursor()
cursor.execute(
"""
DELETE FROM {}
WHERE date>=%s and date<=%s and forecast=%s
""".format(DailyCost._meta.db_table),
[start, end, forecast]
)
def _verify_accepted_costs(self, date, forecast, delete_verified):
"""
Verify if costs were already accepted for passed day. If yes and
recalculation isn't forced, VerifiedDailyCostsExistsError is raised.
"""
if not delete_verified and CostDateStatus.objects.filter(
date=date,
**{'forecast_accepted' if forecast else 'accepted': True}
):
raise VerifiedDailyCostsExistsError()
def _create_daily_costs(self, date, costs, forecast):
"""
For every service environment in costs create DailyCost instance to
save it in database.
"""
logger.info('Creating daily costs instances for {}'.format(date))
daily_costs = []
for service_environment, se_costs in costs.iteritems():
# use _build_tree directly, to collect DailyCosts for all services
# and save all at the end
daily_costs.extend(DailyCost._build_tree(
tree=se_costs,
date=date,
service_environment_id=service_environment,
forecast=forecast,
))
return daily_costs
def _save_costs(self, daily_costs):
"""
Save daily_costs in database.
:param daily_costs: list instances of DailyCost
"""
logger.info('Saving {} costs'.format(len(daily_costs)))
DailyCost.objects.bulk_create(
daily_costs,
batch_size=settings.DAILY_COST_CREATE_BATCH_SIZE,
)
def _update_status(self, date, forecast):
"""
Update status for given date that costs were caculated (including
forecast flag).
"""
# update status to created
status, created = CostDateStatus.objects.get_or_create(date=date)
if forecast:
status.forecast_calculated = True
else:
status.calculated = True
status.save()
def _update_status_period(self, start, end, forecast):
"""
Update status between start and end that costs were caculated
(including forecast flag).
"""
for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
self._update_status(day, forecast)
def _collect_costs(
self,
date,
forecast=False,
plugins=None
):
"""
Collects costs from all plugins and stores them per service environment
"""
logger.debug("Getting report date")
old_queries_count = len(connection.queries)
data = defaultdict(list)
for i, plugin in enumerate(plugins or self.get_plugins()):
try:
plugin_old_queries_count = len(connection.queries)
plugin_report = plugin_runner.run_plugin(
'scrooge_costs',
plugin.plugin_name,
date=date,
forecast=forecast,
type='costs',
**{str(k): v for (k, v) in plugin['plugin_kwargs'].items()}
)
for service_id, service_usage in plugin_report.iteritems():
data[service_id].extend(service_usage)
plugin_queries_count = (
len(connection.queries) - plugin_old_queries_count
)
if settings.DEBUG:
logger.debug('Plugin SQL queries: {0}\n'.format(
plugin_queries_count
))
except KeyError:
logger.warning(
"Usage '{0}' has no usage plugin\n".format(plugin.name)
)
except NoPriceCostError:
logger.warning('No costs defined\n')
except MultiplePriceCostError:
logger.warning('Multiple costs defined\n')
except Exception as e:
logger.exception(
"Error while generating the report: {0}\n".format(e)
)
raise
queries_count = len(connection.queries) - old_queries_count
if settings.DEBUG:
logger.debug('Total SQL queries: {0}'.format(queries_count))
return data
def calculate_daily_costs_for_day(self, day, forecast, plugins):
""""
A convenience wrapper around three other methods.
"""
processed_costs = self.process(day, forecast, plugins=plugins)
daily_costs = self._create_daily_costs(day, processed_costs, forecast)
start = end = day
self.save_period_costs(start, end, forecast, daily_costs)
@classmethod
def _get_services_environments(cls):
"""
This function return all ventures for which report will be ganarated
:param boolean is_active: Flag. Get only active or all.
:returns list: list of ventures
:rtype list:
"""
logger.debug("Getting services environments")
services = ServiceEnvironment.objects.all()
logger.debug("Got {0} services".format(services.count()))
return services
@classmethod
@memoize
def get_plugins(cls):
"""
Returns list of plugins to call, with information and extra cost about
each, such as name and arguments
"""
extra_cost_types_plugins = cls._get_extra_cost_types_plugins()
support_plugins = cls._get_support_plugins()
dynamic_extra_cost_types_plugins = (
cls._get_dynamic_extra_cost_types_plugins()
)
base_usage_types_plugins = cls._get_base_usage_types_plugins()
regular_usage_types_plugins = cls._get_regular_usage_types_plugins()
services_plugins = cls._get_pricing_services_plugins()
teams_plugins = cls._get_teams_plugins()
plugins = (base_usage_types_plugins + regular_usage_types_plugins +
teams_plugins + support_plugins + extra_cost_types_plugins +
dynamic_extra_cost_types_plugins + services_plugins)
return plugins
@classmethod
def _get_base_usage_types(cls, filter_=None):
"""
Returns base usage types which should be visible on report
"""
logger.debug("Getting usage types")
query = UsageType.objects.filter(
usage_type='BU',
)
if filter_:
query = query.filter(**filter_)
return query.order_by('-order', 'name')
@classmethod
def _get_base_usage_types_plugins(cls, filter_=None):
"""
Returns plugins information (name and arguments) for base usage types
"""
base_usage_types = cls._get_base_usage_types(filter_)
result = []
for but in base_usage_types:
but_info = AttributeDict(
name=but.name,
plugin_name=but.get_plugin_name(),
plugin_kwargs={
'usage_type': but,
}
)
result.append(but_info)
return result
@classmethod
def _get_regular_usage_types(cls, filter_=None):
"""
Returns regular usage types which should be visible on report
"""
query = UsageType.objects.filter(
usage_type='RU',
)
if filter_:
query = query.filter(**filter_)
return query.order_by('-order', 'name')
@classmethod
def _get_regular_usage_types_plugins(cls, filter_=None):
"""
Returns plugins information (name and arguments) for regular usage
types
"""
regular_usage_types = cls._get_regular_usage_types(filter_)
result = []
for rut in regular_usage_types:
rut_info = AttributeDict(
name=rut.name,
plugin_name=rut.get_plugin_name(),
plugin_kwargs={
'usage_type': rut,
}
)
result.append(rut_info)
return result
@classmethod
def _get_pricing_services(cls):
"""
Returns services which should be visible on report
"""
return PricingService.objects.order_by('id')
@classmethod
def _get_pricing_services_plugins(cls):
"""
Returns plugins information (name and arguments) for services
"""
pricing_services = cls._get_pricing_services()
result = []
for pricing_service in pricing_services:
pricing_service_info = AttributeDict(
name=pricing_service.name,
plugin_name=pricing_service.get_plugin_name(),
plugin_kwargs={
'pricing_service': pricing_service
}
)
result.append(pricing_service_info)
return result
@classmethod
def _get_teams(cls):
"""
Returns teams which should be visible on report
"""
return Team.objects.order_by('name')
@classmethod
def _get_teams_plugins(cls):
"""
Returns information about team plugins for each team
"""
teams = cls._get_teams()
result = []
for team in teams:
team_info = AttributeDict(
name=team.name,
plugin_name='team_plugin',
plugin_kwargs={
'team': team
}
)
result.append(team_info)
return result
@classmethod
def _get_extra_cost_types(cls):
"""
Returns all extra costs (excluding supports)
"""
# exclude supports (from fixture)
return ExtraCostType.objects.exclude(
pk=2
).order_by('name')
@classmethod
def _get_extra_cost_types_plugins(cls):
"""
Returns information about extra cost plugins for each extra cost
"""
extra_costs = cls._get_extra_cost_types()
result = []
for extra_cost in extra_costs:
extra_cost_info = AttributeDict(
name=extra_cost.name,
plugin_name='extra_cost_plugin',
plugin_kwargs={
'extra_cost_type': extra_cost,
}
)
result.append(extra_cost_info)
return result
@classmethod
def _get_support_plugins(cls):
return [
AttributeDict(
name='support',
plugin_name='support_plugin',
plugin_kwargs={},
)
]
@classmethod
def _get_dynamic_extra_cost_types(cls):
"""
Returns all extra costs
"""
return DynamicExtraCostType.objects.order_by(
'name'
)
@classmethod
def _get_dynamic_extra_cost_types_plugins(cls):
"""
Returns information about extra cost plugins for each extra cost
"""
dynamic_extra_costs = cls._get_dynamic_extra_cost_types()
result = []
for dynamic_extra_cost in dynamic_extra_costs:
| |
cwidth = renwidth
if self.style.yfill:
cheight = renheight
width = cwidth * cols + padding * (cols - 1)
height = cheight * rows + padding * (rows - 1)
rv = renpy.display.render.Render(width, height)
offsets = [ ]
for y in range(0, rows):
for x in range(0, cols):
child = children[ x + y * cols ]
surf = renders[x + y * cols]
xpos = x * (cwidth + padding)
ypos = y * (cheight + padding)
offset = child.place(rv, xpos, ypos, cwidth, cheight, surf)
offsets.append(offset)
if self.transpose:
self.offsets = [ ]
for x in range(cols):
for y in range(rows):
self.offsets.append(offsets[y * cols + x])
else:
self.offsets = offsets
return rv
class IgnoreLayers(Exception):
"""
Raise this to have the event ignored by layers, but reach the
underlay.
"""
pass
class MultiBox(Container):
layer_name = None
first = True
order_reverse = False
def __init__(self, spacing=None, layout=None, style='default', **properties):
if spacing is not None:
properties['spacing'] = spacing
super(MultiBox, self).__init__(style=style, **properties)
self.default_layout = layout
# The start and animation times for children of this
# box.
self.start_times = [ ]
self.anim_times = [ ]
# A map from layer name to the widget corresponding to
# that layer.
self.layers = None
# The scene list for this widget.
self.scene_list = None
def _clear(self):
super(MultiBox, self)._clear()
self.start_times = [ ]
self.anim_times = [ ]
self.layers = None
self.scene_list = None
def _in_old_scene(self):
if self.layer_name is not None:
if self.scene_list is None:
return self
scene_list = [ ]
changed = False
for old_sle in self.scene_list:
new_sle = old_sle.copy()
d = new_sle.displayable._in_old_scene()
if d is not new_sle.displayable:
new_sle.displayable = d
changed = True
scene_list.append(new_sle)
if not changed:
return self
rv = MultiBox(layout=self.default_layout)
rv.layer_name = self.layer_name
rv.append_scene_list(scene_list)
elif self.layers:
rv = MultiBox(layout=self.default_layout)
rv.layers = { }
changed = False
for layer in renpy.config.layers:
old_d = self.layers[layer]
new_d = old_d._in_old_scene()
if new_d is not old_d:
changed = True
rv.add(new_d)
rv.layers[layer] = new_d
if not changed:
return self
else:
return self
if self.offsets:
rv.offsets = list(self.offsets)
if self.start_times:
rv.start_times = list(self.start_times)
if self.anim_times:
rv.anim_times = list(self.anim_times)
return rv
def __unicode__(self):
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
if layout == "fixed":
return "Fixed"
elif layout == "horizontal":
return "HBox"
elif layout == "vertical":
return "VBox"
else:
return "MultiBox"
def add(self, widget, start_time=None, anim_time=None): # W0221
super(MultiBox, self).add(widget)
self.start_times.append(start_time)
self.anim_times.append(anim_time)
def append_scene_list(self, l):
for sle in l:
self.add(sle.displayable, sle.show_time, sle.animation_time)
if self.scene_list is None:
self.scene_list = [ ]
self.scene_list.extend(l)
def render(self, width, height, st, at):
# Do we need to adjust the child times due to our being a layer?
if self.layer_name or (self.layers is not None):
adjust_times = True
else:
adjust_times = False
xminimum = self.style.xminimum
if xminimum is not None:
width = max(width, scale(xminimum, width))
yminimum = self.style.yminimum
if yminimum is not None:
height = max(height, scale(yminimum, height))
if self.first:
self.first = False
if adjust_times:
it = renpy.game.interface.interact_time
self.start_times = [ i or it for i in self.start_times ]
self.anim_times = [ i or it for i in self.anim_times ]
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
self.layout = layout # W0201
else:
layout = self.layout
# Handle time adjustment, store the results in csts and cats.
if adjust_times:
t = renpy.game.interface.frame_time
csts = [ t - start for start in self.start_times ]
cats = [ t - anim for anim in self.anim_times ]
else:
csts = [ st ] * len(self.children)
cats = [ at ] * len(self.children)
offsets = [ ]
if layout == "fixed":
rv = None
if self.style.order_reverse:
iterator = zip(reversed(self.children), reversed(csts), reversed(cats))
else:
iterator = zip(self.children, csts, cats)
for child, cst, cat in iterator:
surf = render(child, width, height, cst, cat)
if rv is None:
if self.style.fit_first:
width, height = surf.get_size()
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if surf:
offset = child.place(rv, 0, 0, width, height, surf)
offsets.append(offset)
else:
offsets.append((0, 0))
if rv is None:
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
# If we're here, we have a box, either horizontal or vertical. Which is good,
# as we can share some code between boxes.
spacing = self.style.spacing
first_spacing = self.style.first_spacing
if first_spacing is None:
first_spacing = spacing
spacings = [ first_spacing ] + [ spacing ] * (len(self.children) - 1)
box_wrap = self.style.box_wrap
xfill = self.style.xfill
yfill = self.style.yfill
# The shared height and width of the current line. The line_height must
# be 0 for a vertical box, and the line_width must be 0 for a horizontal
# box.
line_width = 0
line_height = 0
# The children to layout.
children = list(self.children)
if self.style.box_reverse:
children.reverse()
spacings.reverse()
# a list of (child, x, y, w, h, surf) tuples that are turned into
# calls to child.place().
placements = [ ]
# The maximum x and y.
maxx = 0
maxy = 0
def layout_line(line, xfill, yfill):
"""
Lays out a single line.
`line` a list of (child, x, y, surf) tuples.
`xfill` the amount of space to add in the x direction.
`yfill` the amount of space to add in the y direction.
"""
xfill = max(0, xfill)
yfill = max(0, yfill)
if line:
xperchild = xfill / len(line)
yperchild = yfill / len(line)
else:
xperchild = 0
yperchild = 0
maxxout = maxx
maxyout = maxy
for i, (child, x, y, surf) in enumerate(line):
sw, sh = surf.get_size()
sw = max(line_width, sw)
sh = max(line_height, sh)
x += i * xperchild
y += i * yperchild
sw += xperchild
sh += yperchild
placements.append((child, x, y, sw, sh, surf))
maxxout = max(maxxout, x + sw)
maxyout = max(maxyout, y + sh)
return maxxout, maxyout
x = 0
y = 0
full_width = False
full_height = False
if layout == "horizontal":
full_height = yfill
line_height = 0
line = [ ]
remwidth = width
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rw = width
else:
rw = remwidth
surf = render(d, rw, height - y, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remwidth - sw - padding <= 0 and line:
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
y += line_height
x = 0
line_height = 0
remwidth = width
line = [ ]
line.append((d, x, y, surf))
line_height = max(line_height, sh)
x += sw + padding
remwidth -= (sw + padding)
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
elif layout == "vertical":
full_width = xfill
line_width = 0
line = [ ]
remheight = height
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rh = height
else:
rh = remheight
surf = render(d, width - x, rh, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remheight - sh - padding <= 0:
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
x += line_width
y = 0
line_width = 0
remheight = height
line = [ ]
line.append((d, x, y, surf))
line_width = max(line_width, sw)
y += sh + padding
remheight -= (sh + padding)
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
else:
raise Exception("Unknown box layout: %r" % layout)
# Back to the common for vertical and horizontal.
if not xfill:
width = maxx
if not yfill:
height = maxy
rv = renpy.display.render.Render(width, height)
if self.style.box_reverse ^ self.style.order_reverse:
placements.reverse()
for child, x, y, w, h, surf in placements:
if full_width:
w = width
if full_height:
h = height
offset = child.place(rv, x, y, w, h, surf)
offsets.append(offset)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
def event(self, ev, x, y, st):
children_offsets = zip(self.children, self.offsets, self.start_times)
if not self.style.order_reverse:
children_offsets.reverse()
try:
for i, (xo, yo), t in children_offsets:
if t is None:
cst = st
else:
cst = renpy.game.interface.event_time - t
rv = i.event(ev, x - xo, y | |
#!/usr/bin/python
import sys
import re
residueNameMap_3to1 = {'ALA':'A','ARG':'R','ASN':'N','ASP':'D','ASX':'B',\
'CYS':'C','SCY':'C','GLU':'E','GLN':'Q','GLX':'Z','GLY':'G',\
'HIS':'H','ILE':'I','LEU':'L','LYS':'K',\
'MET':'M','PHE':'F','PRO':'P','SER':'S',\
'THR':'T','TRP':'W','TYR':'Y','VAL':'V',\
'UNK':'X'}
# residueNameMap_3to1 : ASX : ASparagine or aspartic acid; GLX: Glutamine or Glutamic acid
# 'SCY': S-Acetyl-Cysteine 'C', charge 0
def usage():
print """
structrueParser classes can parse PDB and DSSP
files important for Contact String Analysis. This is a
part of ProLego_CS_v3 suit.
start date: 12th July, 2016
author : taushif
1. class PDB:
get instance of the class
read pdbFile (readPDB)
(parserPDB) parse pdb file to get chain information of first model (incase of NMR)
(chainParser) get atomlines and residue information from a chain
(getatom) coordinate information of each residue
2. class DSSP:
runDssp
parseChain
"""
class PDB():
"""PDB structure parser for the code
start the instance with file path PDB(<file path>)
call following with instances:
1. readPDB() --> return status of file read
2. parsePDB() --> Parse into models [populates chain details ins dictionary chainDetails]
3. chainParse(Chain) --> populates self.resDict = {} self.atomLines = [] self.resCord = {}
Functions:
1. getatom(self, resId) : doc in respective def
"""
def __init__(self,fileName):
"""
Parsing a PDB formatted file call PDB(<filename>)
call readPDB and parsePDB
"""
self.fileName = fileName
self.pdb_file = []
self.chainDetail = {}
def readPDB(self):
"""
read pdb file given as input
"""
try:
with open(self.fileName,'r') as f:
self.pdb_file = [i.strip() for i in f.readlines()]
print ("PDB file has read successfully [Okay]")
return 1
except:
print ("Error in File Read: Check Uploaded PDB")
return 0
def parserPDB(self):
"""
parse PDB FIle
"""
print "Follows a strict PDB format for Chain and Model Recognition.\n"
tmp = []
if self.readPDB():
for li in range(len(self.pdb_file)):
l = self.pdb_file[li]
if l[0:6].strip() == 'ATOM':
tmp.append(l.strip())
if l[0:6].strip() == 'TER':
chain = self.pdb_file[li-1][21]
self.chainDetail[chain] = tmp
tmp = []
if l[0:6].strip() == 'END' or l[0:6].strip() == 'ENDMDL':
break
print "Total Chain = %d "%(len(self.chainDetail.keys()))
return 1
else:
print ("Could not follow next")
return 0
def chainParser(self,chain):
"""Chain Parser : details of a particular chain
Args:
chain (str): chain of a pdb chain can be found from chainDetails
Populate the attributes
"""
self.resDict = {}
self.atomLines = []
self.resCord = {}
if chain in self.chainDetail.keys():
for atom_line in self.chainDetail[chain]:
resId = int(atom_line[22:26].strip())
atmId = atom_line[12:16].strip()
self.atomLines.append(atom_line)
self.resDict[resId] = atom_line[17:20].strip()
if resId in self.resCord.keys():
self.resCord[resId][atmId] = (float(atom_line[30:38]),float(atom_line[38:46]),float(atom_line[46:54]))
else:
self.resCord[resId] = {}
self.resCord[resId][atmId] = (float(atom_line[30:38]),float(atom_line[38:46]),float(atom_line[46:54]))
return self.resDict
else:
print "Error 1 !! Chain Not found."
return 0
def getatom(self,resId):
""" getatom : given a residue Id returns (x,y,z) coordinates
@Args:
resId (int) --> residue number in the PDB
@return:
atomCoord (dict) --> atom wise x,y,z coordinates
"""
atomCoord = {}
for i in self.atomLines:
if int(i[22:26].strip()) == resId:
atomCoord[i[12:16].strip()] = (float(i[30:38]),float(i[38:46]),float(i[46:54]))
if atomCoord.keys():
return atomCoord
else:
print resId," is not a valid residue Number [Error 1]"
return 0
def write_PDBChain(self,chain):
"""writing coordinates of a chain
@Args:
chain (str) --> chain of a PDB file
@return:
status (0/1) --> fail/Success
pdbCh_name (str) --> detail file name of the chain written
"""
pdbCh_name = '%s_%s.pdb'%(self.fileName,chain)
pFl = open(pdbCh_name,'w')
if chain in self.chainDetail.keys():
for l in self.chainDetail[chain]:
if re.match(r'%s'%l[12:16].strip(),'H'):
pass
else:
pFl.write("%s\n"%l)
pFl.close()
print ("Chain Written in %s"%pdbCh_name)
return 1,pdbCh_name
else:
print "Error 1: Chain Not Present, could not write"
return 0,'0'
def get_PDBFasta(self,pdbId='prot_name',chain='chain_',comment='PDB Fasta',outType=1):
"""sequence in fasta format from PDB information
Note: from the residues of a PDB file writes the fasta
sequence.
@Args:
pdbId (str)-> Id of the pdb File 4 letter (default: prot_name)
chain (str) -> chain Id (default: chain_)
comment (str) -> comment about the structre
@return:
fastaString : string character '>pdbId:Chain|residues\nSEQUENCE'
"""
self.fastaSeq = ''
fastaHeader = ">%s_%s|%d|%s"%(pdbId,chain,len(self.resDict.keys()),comment)
if self.resDict.keys():
for k in self.resDict.keys():
try:
self.fastaSeq += residueNameMap_3to1[self.resDict[k]]
except 'KeyError':
print k,self.resDict[k],"Not found in List, \n Aborting Updatelist"
else:
print "residue dictionary is empty. see the PDB docstring"
print "Got sequence of length %s"%len(self.fastaSeq)
if outType:
fastaString = "%s\n%s"%(fastaHeader,self.fastaSeq)
return fastaString
else:
return self.fastaSeq
def _getSSE_(self):
"""get secondary structure in formation from structur file
Note: parse sse information from REMARKS
"""
self.sse = {'HELIX':[],'SHEET':[],'TURN':[]}
x = 0
for l in self.pdb_file:
if l[0:5].strip() in self.sse.keys():
self.sse[l[0:6].strip()].append(l.strip())
x = 1
else:
continue
if x:
return self.sse
else:
print ("Remarks seems tobe absent in th PDB file")
return 0
def __sseHelixGroup__(self,hlx):
hlxGroup = {1:'rAlp',2:'rOmg',3:'rPi',4:'rGam',5:'r3t',6:'lAlp',7:'lOmg',8:'lGam',9:'ribb',10:'ppII'}
hlxChainWise = {}
for h in hlx:
chain = h[19]
helId = 'H'+h[7:10].strip()
n_res = int(h[21:25].strip())
c_res = int(h[33:37].strip())
hgrp = int(h[38:40].strip())
if chain in hlxChainWise.keys():
hlxChainWise[chain].update({n_res:[helId,n_res,c_res,hlxGroup[hgrp]]})
else:
hlxChainWise[chain] = {}
hlxChainWise[chain].update({n_res:[helId,n_res,c_res,hlxGroup[hgrp]]})
# hlxChainWise[chain].append([])
return hlxChainWise
def __sseSheetGroup__(self,sheet):
shtChainWise = {}
for strnd in sheet:
chain = strnd[21]
strnId = 'E'+strnd[7:10].strip()
n_res = int(strnd[22:26].strip())
c_res = int(strnd[34:37].strip())
sheetName = strnd[11:14].strip()
if chain in shtChainWise.keys():
shtChainWise[chain].update({n_res:[strnId,n_res,c_res,sheetName]})
else:
shtChainWise[chain] = {}
shtChainWise[chain].update({n_res:[strnId,n_res,c_res,sheetName]})
return shtChainWise
def pdbSSE(self):
"""
P = PDB(pfile)
sseChain = P.pdbSSE()
@paaram: path of pdb file from RCSB [all detail]
@return : Secondary structue information chain wise
"""
self.readPDB()
try:
sse = self._getSSE_()
hlxChain = self.__sseHelixGroup__(sse['HELIX'])
shtChain = self.__sseSheetGroup__(sse['SHEET'])
chains = list(set(hlxChain.keys()+shtChain.keys()))
chainSSE = {}
for c in chains:
if c in hlxChain.keys():
t = hlxChain[c].copy()
if c in shtChain.keys():
t.update(shtChain[c])
else:
pass
else:
t = shtChain[c].copy()
chainSSE[c] = t
return chainSSE
except:
print ("Error in PDB File ")
return 0
class DSSP:
def __init__(self):
self.pdbChain = ''
self.dsspLines = []
self.alInfo = []
self.majSSE= [] # list of SSE H/E in the sequenctial order
self.pdbid = ''
self.sse_index = {'H':[],'E':[],'I':[],'G':[],'B':[],'S':[],'T':[],'C':[]}
self.resCalpha = {}
self.dsspToPdbMap = {}
self.dihedrals = {}
def runDssp(self, pdbChain):
self.pdbChain = pdbChain
import subprocess
outFile = '%s.dssp'%(self.pdbChain)
dssp = 'dssp -i %s -o %s'%(self.pdbChain,outFile)
try:
print dssp
subprocess.Popen(dssp, shell= True).wait()
print "DSSP successfully created"
return 1, outFile
except:
print ("Error 1.2: in running dssp")
return 0, 0
def readDSSP(self, dsspFl):
"""
resding dssp file return 1 or 0
"""
try:
with open(dsspFl,'r') as f:
self.dsspLines = [i for i in f.readlines()]
print ("DSSP file has read successfully [Okay]")
return 1
except:
print ("Error 2.1: DSSP Error parsing unsuccesful\n")
return 0
def dsspParseChain(self):
"""
@param
f: filehandler of file in a list
"""
start =0
for l in self.dsspLines:
chk=l[10:12].strip()
if re.search('HEADER',l):
self.pdbid=l[62:66].strip()
continue
if re.search('#',l):
start=1
continue
if start: #and chk == chain:
tmpres = {}
try:
#pdb residue number
residue = int(l[5:10].strip())
self.dihedrals[residue] = {'kappa':0.0,'alpha':0.0,'phi':0.0,'psi':0.0}
#building empty dictionary for that residue
tmpres[residue] = {'aa': '', 'sse': '', 'acc': '', 'catom': ()}
# amino acid name
tmpres[residue]['aa'] = l[12:14].strip()
# sse type
sse_tmp = l[14:17].strip()
if re.match('[GHITEBS]', sse_tmp):
tmpres[residue]['sse'] = sse_tmp
self.sse_index[sse_tmp].append(residue)
else:
tmpres[residue]['sse'] = 'C'
self.sse_index['C'].append(residue)
#accessible solvent area
tmpres[residue]['acc'] = int(l[34:38].strip())
# calpha x, y and z
x = float(l[117:122].strip())
y = float(l[124:129].strip())
z = float(l[131:].strip())
tmpres[residue]['catom'] = (x, y, z)
self.dihedrals[residue]['kappa'] = float(l[91:97].strip())
self.dihedrals[residue]['alpha'] = float(l[97:103].strip())
self.dihedrals[residue]['phi'] = float(l[103:109].strip())
self.dihedrals[residue]['psi'] = float(l[109:115].strip())
# putting all in resdie information
self.resCalpha[int(l[5:10].strip())] = (x,y,z)
self.alInfo.append(tmpres)
self.dsspToPdbMap[int(l[5:10].strip())] = int(l[:6].strip())
except:
continue
elif chk != chk:
start = 0
def get_cacoord(self,resid_list):
calpha_residues = []
for res in resid_list:
calpha_residues.append(self.resCalpha[res])
return calpha_residues
def getOnlyAlpBeta(self,sse,res,c):
tmp = {}
if re.match(r'[H]',sse): # corrected Helix definition only to alpha helix (H) to NOT HGI
c += 1
strI = 'H'+str(c)
tmp[strI]=res
self.majSSE.append(tmp)
return (c)
elif re.match(r'E',sse):
c += 1
strI = 'E'+str(c)
tmp[strI]=res
self.majSSE.append(tmp)
return (c)
else:
tmp['C']=res
self.majSSE.append(tmp)
return(c)
def getMajorSSE(self):
'''This module Stores different SSE information in separate lists
to a key value. Input to this method is SSE list and output will be
a dictionary.
li is the list of sse information from and res is the residue info'''
# declaring has and list
sseInfo={'H':[],'G':[],'I':[],'T':[],'E':[],'B':[],'S':[],'C':[]}
listOfsse = []
res = []
li = [] # sse information
for r in self.alInfo:
resNo = r.keys()[0]
res.append(resNo)
li.append(r[resNo]['sse'])
hd=[]
counter = 0
# import ipdb; ipdb.set_trace();
for i in range(len(li)-1): # iterating over end-1 elemets of list
if li[i] != li[i+1]:
hd.append(res[i])
# print hd
sseInfo[li[i]].append(hd)
counter = self.getOnlyAlpBeta(li[i],hd,counter)
hd=[]
if i+1 == len(li)-1:
hd.append(res[i+1])
sseInfo[li[i+1]].append(hd)
counter = self.getOnlyAlpBeta(li[i+1],hd,counter)
else:
if i+1 == len(li)-1:
hd.append(res[i+1])
sseInfo[li[i]].append(hd)
counter | |
space point was\n" + str(PS_point))
del new_PS_point[R]
return new_PS_point, vars
@classmethod
def map_to_higher_multiplicity(
cls, PS_point, singular_structure, momenta_dict, kinematic_variables,
compute_jacobian=False ):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
needed_variables = set(
cls.get_kinematic_variables_names(singular_structure, momenta_dict) )
assert needed_variables.issubset(kinematic_variables.keys())
# Build reduced PS point, singular structure & masses to call FinalMassesMapping
reduced_PS_point = PS_point.get_copy()
reduced_singular_structure = sub.SingularStructure()
reduced_kinematic_variables = dict()
parents = []
Q = LorentzVector()
# First build pseudo-particles for the collinear sets
for substructure in singular_structure.substructures:
parent, _, _ = get_structure_numbers(substructure, momenta_dict)
parent_leg = sub.SubtractionLeg(parent, 0, sub.SubtractionLeg.FINAL)
reduced_singular_structure.substructures.append(sub.CollStructure(parent_leg))
s_i = 's' + str(parent)
reduced_kinematic_variables[s_i] = kinematic_variables[s_i]
parents.append(parent)
Q += PS_point[parent]
# Then treat recoilers collectively if any
recoilers = [leg.n for leg in singular_structure.legs]
mass_sum = sum(s ** 0.5 for s in reduced_kinematic_variables.values())
R = None
qR = None
s_R = 0.
if recoilers:
if len(recoilers) == 1:
R = recoilers[0]
else:
R = max(momenta_dict.keys()) + 1
qR = sum(PS_point[r] for r in recoilers)
reduced_PS_point[R] = LorentzVector(qR)
recoiler_leg = sub.SubtractionLeg(R, 0, sub.SubtractionLeg.FINAL)
reduced_singular_structure.substructures.append(
sub.CollStructure(recoiler_leg) )
Q += reduced_PS_point[R]
s_R = reduced_PS_point[R].square()
reduced_kinematic_variables['s'+str(R)] = s_R
# If the mapping is not valid, undo all changes and raise
Q2 = Q.square()
if (s_R ** 0.5) > (Q2 ** 0.5 - mass_sum):
raise FailedMapping
# Perform mapping of parent momenta to target masses
new_PS_point, vars = FinalMassesMapping.map_to_higher_multiplicity(
reduced_PS_point, reduced_singular_structure, momenta_dict,
reduced_kinematic_variables, compute_jacobian )
# Set the kinematic_variables
for parent in parents:
children = momenta_dict[parent]
na, nb = FinalCollinearVariables.collinear_and_reference(PS_point[parent])
FinalCollinearVariables.set(
new_PS_point, parent, children, na, nb, kinematic_variables)
# Boost recoilers
if len(recoilers) > 1:
pR = new_PS_point[R]
for recoiler in recoilers:
# TODO Move this try/except to higher level
try:
new_PS_point[recoiler].rotoboost(qR, pR)
# new_PS_point[recoiler].boost_from_to(qR, pR)
except:
logger.critical(
"Problem encountered for %s" % str(singular_structure))
logger.critical("The full phase space point was\n%s" % str(PS_point))
del new_PS_point[R]
return new_PS_point, vars
@classmethod
def can_map_to_higher_multiplicity(
cls, PS_point, singular_structure, momenta_dict, kinematic_variables, ):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
needed_variables = set(
cls.get_kinematic_variables_names(singular_structure, momenta_dict) )
assert needed_variables.issubset(kinematic_variables.keys())
Q = LorentzVector()
mass_sum = 0.
for substructure in singular_structure.substructures:
parent, _, _ = get_structure_numbers(substructure, momenta_dict)
Q += PS_point[parent]
sqr = kinematic_variables['s'+str(parent)]
mass_sum += sqr ** 0.5
# Then treat recoilers collectively if any
recoilers = [leg.n for leg in singular_structure.legs]
qR = LorentzVector()
if recoilers:
qR = sum(PS_point[r] for r in recoilers)
Q += qR
qR2 = qR.square()
Q2 = Q.square()
return (qR2 ** 0.5) <= (Q2 ** 0.5 - mass_sum)
@classmethod
def can_map_to_lower_multiplicity(
cls, PS_point, singular_structure, momenta_dict, squared_masses=None ):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
Q = LorentzVector()
mass_sum = 0.
for substructure in singular_structure.substructures:
parent, _, _ = get_structure_numbers(substructure, momenta_dict)
Q += PS_point[parent]
if squared_masses is None:
sqr = 0
else:
sqr = squared_masses['m2' + str(parent)]
mass_sum += sqr ** 0.5
# Then treat recoilers collectively if any
recoilers = [leg.n for leg in singular_structure.legs]
qR = LorentzVector()
if recoilers:
qR = sum(PS_point[r] for r in recoilers)
Q += qR
qR2 = qR.square()
Q2 = Q.square()
return (qR2 ** 0.5) <= (Q2 ** 0.5 - mass_sum)
#=========================================================================================
# Initial-collinear mappings
#=========================================================================================
# Generic initial-collinear mapping parent class
#=========================================================================================
class InitialCollinearMapping(VirtualMapping):
"""Common functions for initial-collinear elementary mappings."""
@classmethod
def is_valid_structure(cls, singular_structure):
assert isinstance(singular_structure, sub.SingularStructure)
# Valid only for sets of initial-state particles going collinear,
# with no recursive substructure
for substructure in singular_structure.substructures:
if not substructure.name() == "C":
return False
if substructure.substructures:
return False
if not substructure.get_all_legs().has_initial_state_leg():
return False
# There cannot be more than two collinear sets with initial-state particles
if len(singular_structure.substructures) > 2:
return False
return True
@classmethod
def get_kinematic_variables_names(cls, singular_structure, momenta_dict):
"""Get the names of variables describing particles going unresolved."""
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
names = []
for substructure in singular_structure.substructures:
parent, fs_children, is_child = get_structure_numbers(
substructure, momenta_dict )
InitialCollinearVariables.names(parent, fs_children, is_child)
return names
@classmethod
def rescale_kinematic_variables(
cls, singular_structure, momenta_dict, kinematic_variables, scaling_parameter):
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
needed_variables = set(
cls.get_kinematic_variables_names(singular_structure, momenta_dict))
assert needed_variables.issubset(kinematic_variables.keys())
# Precompute sets and numbers
substructure = singular_structure.substructures[0]
_, fs_children, is_child = get_structure_numbers(substructure, momenta_dict)
# Determine the correct scaling for the divergence to go like 1/parameter
base = scaling_parameter ** (0.5 / len(fs_children))
kinematic_variables['s' + str(is_child)] *= base ** 2
kinematic_variables['kt' + str(is_child)] *= base
for child in fs_children:
kinematic_variables['kt' + str(child)] *= base
return kinematic_variables
# Initial-collinear Lorentz mapping, one set
#=========================================================================================
class InitialLorentzOneMapping(InitialCollinearMapping):
"""Implementation of the Lorentz transformation mapping
for one set of collinear particles (with massless parent)
and an arbitrary number of (eventually massive) recoilers.
"""
@classmethod
def is_valid_structure(cls, singular_structure):
# Valid only for one set of particles going collinear to an initial-state parton,
# with no recursive substructure
if len(singular_structure.substructures) == 1:
return super(InitialLorentzOneMapping, cls).is_valid_structure(
singular_structure )
return False
@classmethod
def map_to_lower_multiplicity(
cls, PS_point, singular_structure, momenta_dict, squared_masses=None,
kinematic_variables=None, compute_jacobian=False ):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
# Precompute sets and numbers
substructure = singular_structure.substructures[0]
parent, fs_children, is_child = get_structure_numbers(substructure, momenta_dict)
assert (squared_masses is None) or (squared_masses['m2' + str(parent)] == 0)
recoilers = tuple(leg.n for leg in singular_structure.legs)
# Build collective momenta
pCa = LorentzVector()
for j in fs_children:
pCa += PS_point[j]
pR = LorentzVector()
for recoiler in recoilers:
pR += PS_point[recoiler]
pa = PS_point[is_child]
pA = pa - pCa
pAmpR = pA - pR
# Compute parameters
xia = (pAmpR.square() - pR.square())/(2*pa.dot(pAmpR))
# Create new PS point
new_PS_point = PS_point.get_copy()
# Map the set's momentum
qA = xia * pa
new_PS_point[parent] = qA
# Map all recoilers' momenta
qR = qA - pAmpR
for recoiler in singular_structure.legs:
# TODO Move this try/except to higher level
try:
new_PS_point[recoiler.n].rotoboost(pR, qR)
# new_PS_point[recoiler.n].boost_from_to(pR, qR)
except:
logger.critical("Problem encountered for " + str(singular_structure))
logger.critical("The full phase space point was\n" + str(PS_point))
# Eliminate children momenta from the mapped phase-space point
for j in fs_children:
del new_PS_point[j]
if is_child != parent: # Bypass degenerate case of 1->1 splitting
del new_PS_point[is_child]
# If needed, update the kinematic_variables dictionary
if kinematic_variables is not None:
na, nb = InitialCollinearVariables.collinear_and_reference(qA)
InitialCollinearVariables.get(
PS_point, fs_children, is_child, na, nb, kinematic_variables )
# TODO Check if the jacobian for this mapping is really 1
jacobian = 1.
# WARNING: for ISR counterterms, the quantity Q below may *not* match with
# PS_point[1] + PS_point[2]. So it should be used only for what it actually is!
Q = pAmpR
mapping_variables = {'jacobian': jacobian, 'Q': Q}
# Return characteristic variables
return new_PS_point, mapping_variables
@classmethod
def map_to_higher_multiplicity(
cls, PS_point, singular_structure, momenta_dict, kinematic_variables,
compute_jacobian=False):
#misc.sprint("Mapping up the PS point:\n", str(PS_point))
#misc.sprint("with momenta dict: %s",momenta_dict)
#misc.sprint("with variables:\n", kinematic_variables)
#misc.sprint("and structure", singular_structure)
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
needed_variables = set(
cls.get_kinematic_variables_names(singular_structure, momenta_dict))
assert needed_variables.issubset(kinematic_variables.keys())
# Precompute sets and numbers
substructure = singular_structure.substructures[0]
parent, fs_children, is_child = get_structure_numbers(substructure, momenta_dict)
recoilers = tuple(leg.n for leg in singular_structure.legs)
# Build collective momenta
qA = PS_point[parent]
na, nb = InitialCollinearVariables.collinear_and_reference(qA)
nanb = na.dot(nb)
qR = LorentzVector()
for recoiler in recoilers:
qR += PS_point[recoiler]
qRmqA = qR - qA
zA = kinematic_variables['z' + str(is_child)]
ktA = kinematic_variables['kt' + str(is_child)]
pA2 = kinematic_variables['s' + str(is_child)]
ptA = ktA
# ptA = zA * ktA
quad_a = na.dot(qRmqA)
quad_b = 2*ptA.dot(qRmqA) + pA2 + qRmqA.square() - qR.square()
quad_c = (pA2 - ptA.square()) * 2*nb.dot(qRmqA) / nanb
sqrt_delta = math.sqrt(quad_b**2 - 4*quad_a*quad_c)
# WARNING
# The plus sign in front of sqrt_delta is the correct one
# for > 90% of physical phase-space points, but not all the times.
# It seems like | |
Type="Constant_Zone", File="N1Y.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="12", Type="Constant_Zone", File="N1Z.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="13", Type="Constant_Zone", File="N2X.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="14", Type="Constant_Zone", File="N2Y.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="15", Type="Constant_Zone", File="N2Z.bin", Format=fmt))
root.append(mat)
# add a material for top and bottom layers
if add_grips:
grips = etree.Element('Material', numM=str(grip_id + 1),
Lib=os.path.join(elasaniso_path, 'libUmatAmitex.so'),
Law='elasiso')
grips.append(etree.Element(_tag='Coeff', Index='1', Type='Constant', Value=str(grip_constants[0])))
grips.append(etree.Element(_tag='Coeff', Index='2', Type='Constant', Value=str(grip_constants[1])))
root.append(grips)
# add a material for external buffer
if add_exterior or use_mask:
exterior = etree.Element('Material', numM=str(ext_id + 1),
Lib=os.path.join(elasaniso_path, 'libUmatAmitex.so'),
Law='elasiso')
exterior.append(etree.Element(_tag='Coeff', Index='1', Type='Constant', Value='0.'))
exterior.append(etree.Element(_tag='Coeff', Index='2', Type='Constant', Value='0.'))
root.append(exterior)
tree = etree.ElementTree(root)
tree.write('mat.xml', xml_declaration=True, pretty_print=True,
encoding='UTF-8')
print('material file written in mat.xml')
# if possible, write the vtk file to run the computation
if self.__contains__('grain_map'):
# convert the grain map to vtk file
from vtk.util import numpy_support
#TODO adapt to 2D grain maps
#TODO build a continuous grain map for amitex
# grain_ids = self.get_grain_map(as_numpy=True)
grain_ids = self.renumber_grains(only_grain_map=True)
if not self._is_empty('phase_map'):
# use the phase map for the material ids
material_ids = self.get_phase_map(as_numpy=True)
elif use_mask:
material_ids = self.get_mask(as_numpy=True).astype(
grain_ids.dtype)
else:
material_ids = np.ones_like(grain_ids)
if add_grips:
# add a layer of new_id (the value must actually be the first
# grain id) above and below the sample.
grain_ids = np.pad(grain_ids, ((0, 0),
(0, 0),
(grip_size, grip_size)),
mode='constant', constant_values=1)
if use_mask:
# create top and bottom mask extrusions
mask_top = material_ids[:,:,[-1]]
mask_bot = material_ids[:,:,[0]]
top_grip = np.tile(mask_top, (1,1,grip_size))
bot_grip = np.tile(mask_top, (1,1,grip_size))
# add grip layers to unit cell matID
material_ids = np.concatenate(
((grip_id+1)*bot_grip, material_ids,
(grip_id+1)*top_grip), axis=2)
else:
material_ids = np.pad(
material_ids, ((0, 0), (0, 0), (grip_size, grip_size)),
mode='constant', constant_values=grip_id+1)
if add_exterior and not use_mask:
# add a layer of new_id around the first two dimensions
grain_ids = np.pad(grain_ids, ((exterior_size, exterior_size),
(exterior_size, exterior_size),
(0, 0)),
mode='constant', constant_values=1)
material_ids = np.pad(material_ids,
((exterior_size, exterior_size),
(exterior_size, exterior_size),
(0, 0)),
mode='constant', constant_values=ext_id+1)
if use_mask:
grain_ids[np.where(grain_ids == 0)] = 1
material_ids[np.where(material_ids == 0)] = ext_id + 1
# write both arrays as VTK files for amitex
voxel_size = self.get_voxel_size()
for array, array_name in zip([grain_ids, material_ids],
['grain_ids', 'material_ids']):
print('array name:', array_name, 'array type:', array.dtype)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array,
order='F'),
deep=1)
vtk_data_array.SetName(array_name)
grid = vtk.vtkImageData()
size = array.shape
grid.SetExtent(0, size[0], 0, size[1], 0, size[2])
grid.GetCellData().SetScalars(vtk_data_array)
grid.SetSpacing(voxel_size, voxel_size, voxel_size)
writer = vtk.vtkStructuredPointsWriter()
writer.SetFileName('%s_%s.vtk' % (self.get_sample_name(),
array_name))
if binary:
writer.SetFileTypeToBinary()
writer.SetInputData(grid)
writer.Write()
print('%s array written in legacy vtk form for AMITEX_FFTP' %
array_name)
def from_amitex_fftp(self, results_basename, grip_size=0, ext_size=0,
grip_dim=2, sim_prefix='Amitex', int_var_names=dict(),
finite_strain=False, load_fields=True,
compression_options=dict()):
"""Read output of a Amitex_fftp simulation and stores in dataset.
Read a Amitex_fftp result directory containing a mechanical simulation
of the microstructure. See method 'to_amitex_fftp' to generate input
files for such simulation of Microstructure instances.
The results are stored as fields of the CellData group by default.
If generated by the simulation, the strain and stress tensor fields
are stored, as well as the internal variables fields.
Mechanical fields and macroscopical curves are stored. The latter is
stored in the data group '/Mechanical_simulation' as a structured
array.
.. Warning 1::
For now, this methods can store the results of several
snapshots but without writing them as a xdmf time serie. This
feature will be implemented in the future.
.. Warning 2::
For now, results are only stored on CellData group. Method will
be modified in the future to allow to specify a new image data
group to store de results (created if needed).
:param results_basename: Basename of Amitex .std, .vtk output files to
load in dataset.
:type results_basename: str
:param grip_size: Thickness of the grips added to simulation unit cell
by the method 'to_amitex_fftp' of this class, defaults to 0. This
value corresponds to a number of voxels on both ends of the cell.
:type grip_size: int, optional
:param grip_dim: Dimension along which the tension test has been
simulated (0:x, 1:y, 2:z)
:type grip_dim: int, optional
:param ext_size: Thickness of the exterior region added to simulation
unit cell by the method 'to_amitex_fftp' of this class,
defaults to 0. This value corresponds to a number of voxels on
both ends of the cell.
:type ext_size: int, optional
:param sim_prefix: Prefix of the name of the fields that will be
stored on the CellData group from simulation results.
:type sim_prefix: str, optional
:param int_var_names: Dictionnary whose keys are the names of
internal variables stored in Amitex output files
(varInt1, varInt2...) and values are corresponding names for
these variables in the dataset.
:type int_var_names: dict, optional
"""
# TODO: add grain map to all time steps
from pymicro.core.utils.SDAmitexUtils import SDAmitexIO
# Get std file result
p_mstd = Path(str(results_basename)).absolute().with_suffix('.mstd')
# safety check
if not p_mstd.exists():
raise ValueError('results not found, "results_basename" argument'
' not associated with Amitex_fftp simulation'
' results.')
# load .mstd results --> only lines of microstructure material mean
# values
step = 1
if grip_size > 0:
step += 1
if ext_size >0 :
step += 1
elif not self._is_empty('mask'):
# if mask used as exterior in computation but exterior size = 0
# still eed to add 1 to step as .mstd will have three lines per
# increment
if np.any(self['mask'] == 0):
step += 1
std_res = SDAmitexIO.load_std(p_mstd, step=step)
# load .zstd results
p_zstd = Path(str(results_basename)+'_1').with_suffix('.zstd')
if p_zstd.exists():
zstd_res = SDAmitexIO.load_std(p_zstd, Int_var_names=int_var_names)
# Store macro data in specific group
self.add_group(groupname=f'{sim_prefix}_Results', location='/',
indexname='fft_sim', replace=True)
# std_res is a numpy structured array whose fields depend on
# the type of output (finite strain ou infinitesimal strain sim.)
# ==> we load it into the dataset as a structured table data item.
self.add_table(location='fft_sim', name='Standard_output',
indexname=f'{sim_prefix}_std',
description=std_res.dtype, data=std_res)
# idem for zstd --> Add as Mechanical Grain Data Table
if p_zstd.exists():
self.add_table(location='GrainData',
name='MechanicalGrainDataTable',
indexname='Mech_Grain_Data',
description=zstd_res.dtype, data=zstd_res)
grainIDs = self.get_grain_ids()
N_zone_times = int(zstd_res.shape[0]/ len(grainIDs))
dtype_col = np.dtype([('grain_ID', np.int)])
IDs = np.tile(grainIDs, N_zone_times).astype(dtype_col)
self.add_tablecols(tablename='MechanicalGrainDataTable',
description=IDs.dtype, data=IDs)
# End of macroscopic data loading. Check if field data must be loaded.
if not load_fields:
return
# Get vtk files results
Stress, Strain, VarInt, Incr_list = SDAmitexIO.load_amitex_output_fields(
results_basename, grip_size=grip_size, ext_size=ext_size,
grip_dim=2)
## Loop over time steps: create group to store results
self.add_group(groupname=f'{sim_prefix}_fields',
location='/CellData', indexname='fft_fields',
replace=True)
# Create CellData temporal subgrids for each time value with a vtk
# field output
time_values = std_res['time'][Incr_list].squeeze()
if len(time_values) == 0:
time_values = [0.]
self.add_grid_time('CellData', time_values)
# Add fields to CellData grid collections
for incr in Stress:
Time = std_res['time'][incr].squeeze()
fieldname = f'{sim_prefix}_stress'
self.add_field(gridname='CellData', fieldname=fieldname,
array=Stress[incr], location='fft_fields',
time=Time, compression_options=compression_options)
for incr in Strain:
Time = std_res['time'][incr].squeeze()
fieldname = f'{sim_prefix}_strain'
self.add_field(gridname='CellData', fieldname=fieldname,
array=Strain[incr], location='fft_fields',
time=Time, compression_options=compression_options)
for mat in VarInt:
for incr in VarInt[mat]:
Time = std_res['time'][incr].squeeze()
for var in VarInt[mat][incr]:
varname = var
if int_var_names.__contains__(var):
varname = int_var_names[var]
fieldname = f'{sim_prefix}_mat{mat}_{varname}'
self.add_field(gridname='CellData', fieldname=fieldname,
array=VarInt[mat][incr][var],
location='fft_fields', time=Time,
compression_options=compression_options)
return
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour
is located
:param str grain_prefix: The grain prefix used to name the elsets
corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
# TODO : test
for g in self.grains:
o = Orientation.from_rodrigues(g['orientation'])
f.write(' **elset %s%d *file %s *integration '
'theta_method_a 1.0 1.e-9 150 *rotation '
'%7.3f %7.3f %7.3f\n' % (grain_prefix, g['idnumber'],
mat_file,
o.phi1(), o.Phi(), o.phi2()))
f.close()
return
def to_dream3d(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.get_sample_name(), 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures',
data=np.array([[999], [1]],
dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName',
data=[a.encode('utf8')
for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = | |
_utils.json_to_proto(
_utils.body_to_json(response), Message.Response)
artifact = {
artifact.key: artifact for artifact in response_msg.artifacts}.get(key)
if artifact is None:
raise KeyError("no artifact found with key {}".format(key))
if artifact.path_only:
return artifact.path, artifact.path_only
else:
# download artifact from artifact store
url = self._get_url_for_artifact(key, "GET").url
response = _utils.make_request("GET", url, self._conn)
_utils.raise_for_http_error(response)
return response.content, artifact.path_only
# TODO: Fix up get dataset to handle the Dataset class when logging dataset
# version
def _get_dataset(self, key):
"""
Gets the dataset with name `key` from this Experiment Run.
If the dataset was originally logged as just a filesystem path, that path will be returned.
Otherwise, bytes representing the dataset object will be returned.
Parameters
----------
key : str
Name of the artifact.
Returns
-------
str or bytes
Filesystem path or bytes representing the artifact.
bool
True if the artifact was only logged as its filesystem path.
"""
# get key-path from ModelDB
Message = _ExperimentRunService.GetDatasets
msg = Message(id=self.id)
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET",
"{}://{}/api/v1/modeldb/experiment-run/getDatasets".format(
self._conn.scheme, self._conn.socket),
self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(
_utils.body_to_json(response), Message.Response)
dataset = {
dataset.key: dataset for dataset in response_msg.datasets}.get(key)
if dataset is None:
# may be old artifact-based dataset
try:
dataset, path_only = self._get_artifact(key)
except KeyError:
six.raise_from(KeyError("no dataset found with key {}".format(key)),
None)
else:
return dataset, path_only, None
else:
return dataset.path, dataset.path_only, dataset.linked_artifact_id
def clone(self, experiment_id=None):
"""
Returns a newly-created copy of this experiment run.
Parameters
----------
experiment_id : str, optional
ID of experiment to clone this run into. If not provided, the new
run will be cloned into this run's experiment.
Returns
-------
:class:`~verta._tracking.experimentrun.ExperimentRun`
"""
# get info for the current run
Message = _ExperimentRunService.CloneExperimentRun
msg = Message(src_experiment_run_id=self.id,
dest_experiment_id=experiment_id)
response = self._conn.make_proto_request("POST",
"/api/v1/modeldb/experiment-run/cloneExperimentRun",
body=msg)
new_run_msg = self._conn.maybe_proto_response(
response, Message.Response).run
new_run = ExperimentRun(self._conn, self._conf, new_run_msg)
return new_run
def get_date_created(self):
"""
Gets a timestamp representing the time (in UTC) this Experiment Run was created.
Returns
-------
timestamp : int
Unix timestamp in milliseconds.
"""
self._refresh_cache()
return int(self._msg.date_created)
def get_date_updated(self):
"""
Gets a timestamp representing the time (in UTC) this Experiment Run was updated.
Returns
-------
timestamp : int
Unix timestamp in milliseconds.
"""
self._refresh_cache()
return int(self._msg.date_updated)
def log_tag(self, tag):
"""
Logs a tag to this Experiment Run.
Parameters
----------
tag : str
Tag.
"""
if not isinstance(tag, six.string_types):
raise TypeError("`tag` must be a string")
Message = _ExperimentRunService.AddExperimentRunTags
msg = Message(id=self.id, tags=[tag])
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/addExperimentRunTags".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
_utils.raise_for_http_error(response)
self._clear_cache()
def log_tags(self, tags):
"""
Logs multiple tags to this Experiment Run.
Parameters
----------
tags : list of str
Tags.
"""
tags = _utils.as_list_of_str(tags)
Message = _ExperimentRunService.AddExperimentRunTags
msg = Message(id=self.id, tags=tags)
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/addExperimentRunTags".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
_utils.raise_for_http_error(response)
self._clear_cache()
def get_tags(self):
"""
Gets all tags from this Experiment Run.
Returns
-------
list of str
All tags.
"""
Message = _CommonService.GetTags
msg = Message(id=self.id)
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET",
"{}://{}/api/v1/modeldb/experiment-run/getExperimentRunTags".format(
self._conn.scheme, self._conn.socket),
self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(
_utils.body_to_json(response), Message.Response)
return response_msg.tags
def log_attribute(self, key, value, overwrite=False):
"""
Logs an attribute to this Experiment Run.
Parameters
----------
key : str
Name of the attribute.
value : one of {None, bool, float, int, str, list, dict}
Value of the attribute.
overwrite : bool, default False
Whether to allow overwriting an existing atribute with key `key`.
"""
_utils.validate_flat_key(key)
if isinstance(value, data_types._VertaDataType):
value = value._as_dict()
if overwrite:
self._delete_attributes([key])
attribute = _CommonCommonService.KeyValue(
key=key, value=_utils.python_to_val_proto(value, allow_collection=True))
msg = _ExperimentRunService.LogAttribute(
id=self.id, attribute=attribute)
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logAttribute".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("attribute with key {} already exists;"
" consider using observations instead, or setting overwrite=True.".format(key))
else:
_utils.raise_for_http_error(response)
self._clear_cache()
def log_attributes(self, attributes, overwrite=False):
"""
Logs potentially multiple attributes to this Experiment Run.
Parameters
----------
attributes : dict of str to {None, bool, float, int, str, list, dict}
Attributes.
overwrite : bool, default False
Whether to allow overwriting an existing atributes.
"""
# validate all keys first
for key in six.viewkeys(attributes):
_utils.validate_flat_key(key)
for key, value in six.viewitems(attributes):
if isinstance(value, data_types._VertaDataType):
attributes[key] = value._as_dict()
if overwrite:
keys = list(six.viewkeys(attributes))
self._delete_attributes(keys)
# build KeyValues
attribute_keyvals = []
for key, value in six.viewitems(attributes):
attribute_keyvals.append(_CommonCommonService.KeyValue(
key=key, value=_utils.python_to_val_proto(value, allow_collection=True)))
msg = _ExperimentRunService.LogAttributes(
id=self.id, attributes=attribute_keyvals)
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logAttributes".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("some attribute with some input key already exists;"
" consider using observations instead, or setting overwrite=True.")
else:
_utils.raise_for_http_error(response)
self._clear_cache()
def get_attribute(self, key):
"""
Gets the attribute with name `key` from this Experiment Run.
Parameters
----------
key : str
Name of the attribute.
Returns
-------
one of {None, bool, float, int, str}
Value of the attribute.
"""
_utils.validate_flat_key(key)
Message = _CommonService.GetAttributes
msg = Message(id=self.id, attribute_keys=[key])
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET",
"{}://{}/api/v1/modeldb/experiment-run/getAttributes".format(
self._conn.scheme, self._conn.socket),
self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(
_utils.body_to_json(response), Message.Response)
attributes = _utils.unravel_key_values(response_msg.attributes)
try:
# TODO: VR-10434 try to read into a `_VertaDataType`
return attributes[key]
except KeyError:
six.raise_from(
KeyError("no attribute found with key {}".format(key)), None)
def get_attributes(self):
"""
Gets all attributes from this Experiment Run.
Returns
-------
dict of str to {None, bool, float, int, str}
Names and values of all attributes.
"""
Message = _CommonService.GetAttributes
msg = Message(id=self.id, get_all=True)
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET",
"{}://{}/api/v1/modeldb/experiment-run/getAttributes".format(
self._conn.scheme, self._conn.socket),
self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(
_utils.body_to_json(response), Message.Response)
# TODO: VR-10434 try to read into a `_VertaDataType`
return _utils.unravel_key_values(response_msg.attributes)
def _delete_attributes(self, keys):
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteExperimentRunAttributes".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'attribute_keys': keys})
_utils.raise_for_http_error(response)
def _delete_metrics(self, keys):
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteMetrics".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'metric_keys': keys})
_utils.raise_for_http_error(response)
def _delete_observations(self, keys):
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteObservations".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'observation_keys': keys})
_utils.raise_for_http_error(response)
def _delete_hyperparameters(self, keys):
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteHyperparameters".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'hyperparameter_keys': keys})
_utils.raise_for_http_error(response)
def log_metric(self, key, value, overwrite=False):
"""
Logs a metric to this Experiment Run.
If the metadatum of interest might recur, :meth:`.log_observation` should be used instead.
Parameters
----------
key : str
Name of the metric.
value : one of {None, bool, float, int, str}
Value of the metric.
overwrite : bool, default False
Whether to allow overwriting an existing metric with key `key`.
"""
_utils.validate_flat_key(key)
metric = _CommonCommonService.KeyValue(
key=key, value=_utils.python_to_val_proto(value))
msg = _ExperimentRunService.LogMetric(id=self.id, metric=metric)
data = _utils.proto_to_json(msg)
if overwrite:
self._delete_metrics([key])
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logMetric".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("metric with key {} already exists;"
" consider using observations instead".format(key))
else:
_utils.raise_for_http_error(response)
self._clear_cache()
def log_metrics(self, metrics, overwrite=False):
"""
Logs potentially multiple metrics to this Experiment Run.
Parameters
----------
metrics : dict of str to {None, bool, float, int, str}
Metrics.
overwrite : bool, default False
Whether to allow overwriting an existing metric with key `key`.
"""
# validate all keys first
for key in six.viewkeys(metrics):
_utils.validate_flat_key(key)
# build KeyValues
metric_keyvals = []
keys = []
for key, value in six.viewitems(metrics):
metric_keyvals.append(_CommonCommonService.KeyValue(
key=key, value=_utils.python_to_val_proto(value)))
keys.append(key)
msg = _ExperimentRunService.LogMetrics(
id=self.id, metrics=metric_keyvals)
data = _utils.proto_to_json(msg)
if overwrite:
self._delete_metrics(keys)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logMetrics".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("some metric with some input key already exists;"
" consider using observations instead")
else:
_utils.raise_for_http_error(response)
self._clear_cache()
def get_metric(self, key):
"""
Gets the metric with name `key` from this Experiment Run.
Parameters
----------
key : str
Name of the metric.
Returns
-------
one of {None, bool, float, int, str}
Value of the metric.
"""
self._refresh_cache()
if key in self._metrics:
return self._metrics[key]
else:
six.raise_from(
KeyError("no metric found with key {}".format(key)), None)
def get_metrics(self):
"""
Gets all metrics from this Experiment Run.
Returns
-------
dict of str to {None, bool, float, int, str}
Names and values of all metrics.
"""
self._refresh_cache()
return self._metrics
def log_hyperparameter(self, key, value, overwrite=False):
"""
Logs a hyperparameter to this Experiment Run.
Parameters
----------
key : str
Name of the hyperparameter.
value : one of {None, bool, float, int, str}
Value of the hyperparameter.
overwrite : bool, default False
Whether to allow overwriting an existing hyperparameter with key `key`.
"""
_utils.validate_flat_key(key)
| |
<filename>pyOCD/target/cortex_m.py
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.target.target import Target
from pyOCD.target.target import TARGET_RUNNING, TARGET_HALTED
from pyOCD.transport.cmsis_dap import DP_REG
import logging
# Debug Halting Control and Status Register
DHCSR = 0xE000EDF0
# Debug Core Register Selector Register
DCRSR = 0xE000EDF4
REGWnR = (1 << 16)
# Debug Core Register Data Register
DCRDR = 0xE000EDF8
# Debug Exception and Monitor Control Register
DEMCR = 0xE000EDFC
TRACE_ENA = (1 << 24)
VC_HARDERR = (1 << 9)
VC_BUSERR = (1 << 8)
VC_CORERESET = (1 << 0)
NVIC_AIRCR = (0xE000ED0C)
NVIC_AIRCR_VECTKEY = (0x5FA << 16)
NVIC_AIRCR_VECTRESET = (1 << 0)
NVIC_AIRCR_SYSRESETREQ = (1 << 2)
CSYSPWRUPACK = 0x80000000
CDBGPWRUPACK = 0x20000000
CSYSPWRUPREQ = 0x40000000
CDBGPWRUPREQ = 0x10000000
TRNNORMAL = 0x00000000
MASKLANE = 0x00000f00
C_DEBUGEN = (1 << 0)
C_HALT = (1 << 1)
C_STEP = (1 << 2)
C_MASKINTS = (1 << 3)
C_SNAPSTALL = (1 << 4)
DBGKEY = (0xA05F << 16)
# FPB (breakpoint)
FP_CTRL = (0xE0002000)
FP_CTRL_KEY = (1 << 1)
FP_COMP0 = (0xE0002008)
CORE_REGISTER = {'r0': 0,
'r1': 1,
'r2': 2,
'r3': 3,
'r4': 4,
'r5': 5,
'r6': 6,
'r7': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'sp': 13,
'lr': 14,
'pc': 15,
'xpsr': 16,
}
targetXML = "<?xml version=\"1.0\"?>\n" \
"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">\n" \
"<target>\n" \
"<feature name=\"org.gnu.gdb.arm.m-profile\">\n" \
"<reg name=\"r0\" bitsize=\"32\"/>\n" \
"<reg name=\"r1\" bitsize=\"32\"/>\n" \
"<reg name=\"r2\" bitsize=\"32\"/>\n" \
"<reg name=\"r3\" bitsize=\"32\"/>\n" \
"<reg name=\"r4\" bitsize=\"32\"/>\n" \
"<reg name=\"r5\" bitsize=\"32\"/>\n" \
"<reg name=\"r6\" bitsize=\"32\"/>\n" \
"<reg name=\"r7\" bitsize=\"32\"/>\n" \
"<reg name=\"r8\" bitsize=\"32\"/>\n" \
"<reg name=\"r9\" bitsize=\"32\"/>\n" \
"<reg name=\"r10\" bitsize=\"32\"/>\n" \
"<reg name=\"r11\" bitsize=\"32\"/>\n" \
"<reg name=\"r12\" bitsize=\"32\"/>\n" \
"<reg name=\"sp\" bitsize=\"32\" type=\"data_ptr\"/>\n" \
"<reg name=\"lr\" bitsize=\"32\"/>\n" \
"<reg name=\"pc\" bitsize=\"32\" type=\"code_ptr\"/>\n" \
"<reg name=\"xpsr\" bitsize=\"32\" regnum=\"16\"/>\n" \
"</feature>\n" \
"</target>\n"
"""
convert a byte array into a word array
"""
def byte2word(data):
res = []
for i in range(len(data)/4):
res.append(data[i*4 + 0] << 0 |
data[i*4 + 1] << 8 |
data[i*4 + 2] << 16 |
data[i*4 + 3] << 24)
return res
"""
convert a word array into a byte array
"""
def word2byte(data):
res = []
for x in data:
res.append((x >> 0) & 0xff)
res.append((x >> 8) & 0xff)
res.append((x >> 16) & 0xff)
res.append((x >> 24) & 0xff)
return res
class Breakpoint():
def __init__(self, comp_register_addr):
self.comp_register_addr = comp_register_addr
self.enabled = False
self.addr = 0
class CortexM(Target):
"""
This class has basic functions to access a Cortex M core:
- init
- read/write memory
- read/write core registers
- set/remove hardware breakpoints
"""
def __init__(self, transport):
self.transport = transport
self.auto_increment_page_size = 0
self.idcode = 0
self.breakpoints = []
self.nb_code = 0
self.num_breakpoint_used = 0
self.nb_lit = 0
self.fpb_enabled = False
self.targetXML = targetXML
return
def init(self, setup_fpb = True):
"""
Cortex M initialization
"""
self.idcode = self.readIDCode()
# select bank 0 (to access DRW and TAR)
self.transport.writeDP(DP_REG['SELECT'], 0)
self.transport.writeDP(DP_REG['CTRL_STAT'], CSYSPWRUPREQ | CDBGPWRUPREQ)
while True:
r = self.transport.readDP(DP_REG['CTRL_STAT'])
if (r & (CDBGPWRUPACK | CSYSPWRUPACK)) == (CDBGPWRUPACK | CSYSPWRUPACK):
break
self.transport.writeDP(DP_REG['CTRL_STAT'], CSYSPWRUPREQ | CDBGPWRUPREQ | TRNNORMAL | MASKLANE)
self.transport.writeDP(DP_REG['SELECT'], 0)
if setup_fpb:
self.halt()
self.setupFPB()
return
def setupFPB(self):
"""
Reads the number of hardware breakpoints available on the core
and disable the FPB (Flash Patch and Breakpoint Unit)
which will be enabled when a first breakpoint will be set
"""
# setup FPB (breakpoint)
fpcr = self.readMemory(FP_CTRL)
self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)
logging.info("%d hardware breakpoints", self.nb_code)
for i in range(self.nb_code):
self.breakpoints.append(Breakpoint(FP_COMP0 + 4*i))
# disable FPB (will be enabled on first bp set)
self.disableFPB()
for bp in self.breakpoints:
self.writeMemory(bp.comp_register_addr, 0)
def info(self, request):
return self.transport.info(request)
def readIDCode(self):
"""
return the IDCODE of the core
"""
if self.idcode == 0:
self.idcode = self.transport.readDP(DP_REG['IDCODE'])
return self.idcode
def writeMemory(self, addr, value, transfer_size = 32):
"""
write a memory location.
By default the transfer size is a word
"""
self.transport.writeMem(addr, value, transfer_size)
return
def readMemory(self, addr, transfer_size = 32):
"""
read a memory location. By default, a word will
be read
"""
return self.transport.readMem(addr, transfer_size)
def readBlockMemoryUnaligned8(self, addr, size):
"""
read a block of unaligned bytes in memory. Returns
an array of byte values
"""
res = []
# try to read 8bits data
if (size > 0) and (addr & 0x01):
mem = self.readMemory(addr, 8)
logging.debug("get 1 byte at %s: 0x%X", hex(addr), mem)
res.append(mem)
size -= 1
addr += 1
# try to read 16bits data
if (size > 1) and (addr & 0x02):
mem = self.readMemory(addr, 16)
logging.debug("get 2 bytes at %s: 0x%X", hex(addr), mem)
res.append(mem & 0xff)
res.append((mem >> 8) & 0xff)
size -= 2
addr += 2
# try to read aligned block of 32bits
if (size >= 4):
logging.debug("read blocks aligned at 0x%X, size: 0x%X", addr, (size/4)*4)
mem = self.readBlockMemoryAligned32(addr, size/4)
res += word2byte(mem)
size -= 4*len(mem)
addr += 4*len(mem)
if (size > 1):
mem = self.readMemory(addr, 16)
logging.debug("get 2 bytes at %s: 0x%X", hex(addr), mem)
res.append(mem & 0xff)
res.append((mem >> 8) & 0xff)
size -= 2
addr += 2
if (size > 0):
mem = self.readMemory(addr, 8)
logging.debug("get 1 byte remaining at %s: 0x%X", hex(addr), mem)
res.append(mem)
size -= 1
addr += 1
return res
def writeBlockMemoryUnaligned8(self, addr, data):
"""
write a block of unaligned bytes in memory.
"""
size = len(data)
idx = 0
#try to write 8 bits data
if (size > 0) and (addr & 0x01):
logging.debug("write 1 byte at 0x%X: 0x%X", addr, data[idx])
self.writeMemory(addr, data[idx], 8)
size -= 1
addr += 1
idx += 1
# try to write 16 bits data
if (size > 1) and (addr & 0x02):
logging.debug("write 2 bytes at 0x%X: 0x%X", addr, data[idx] | (data[idx+1] << 8))
self.writeMemory(addr, data[idx] | (data[idx+1] << 8), 16)
size -= 2
addr += 2
idx += 2
# write aligned block of 32 bits
if (size >= 4):
logging.debug("write blocks aligned at 0x%X, size: 0x%X", addr, (size/4)*4)
data32 = byte2word(data[idx:idx + (size & ~0x03)])
self.writeBlockMemoryAligned32(addr, data32)
addr += size & ~0x03
idx += size & ~0x03
size -= size & ~0x03
# try to write 16 bits data
if (size > 1):
logging.debug("write 2 bytes at 0x%X: 0x%X", addr, data[idx] | (data[idx+1] << 8))
self.writeMemory(addr, data[idx] | (data[idx+1] << 8), 16)
size -= 2
addr += 2
idx += 2
#try to write 8 bits data
if (size > 0):
logging.debug("write 1 byte at 0x%X: 0x%X", addr, data[idx])
self.writeMemory(addr, data[idx], 8)
size -= 1
addr += 1
idx += 1
return
def writeBlockMemoryAligned32(self, addr, data):
"""
write a block of aligned words in memory.
"""
size = len(data)
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
self.transport.writeBlock32(addr, data[:n/4])
data = data[n/4:]
size -= n/4
addr += n
return
def readBlockMemoryAligned32(self, addr, size):
"""
read a block of aligned words in memory. Returns
an array of word values
"""
resp = []
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
resp += self.transport.readBlock32(addr, n/4)
size -= n/4
addr += n
return resp
def halt(self):
"""
halt the core
"""
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_HALT)
return
def step(self):
"""
perform an instruction level step
"""
if self.getState() != TARGET_HALTED:
logging.debug('cannot step: target not halted')
return
if self.maybeSkipBreakpoint() is None:
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_STEP)
return
def reset(self):
"""
| |
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-rincon:{0}".format(master.uid)),
("CurrentURIMetaData", ""),
]
)
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([("InstanceID", 0)])
self._zgs_cache.clear()
self._parse_zone_group_state()
def create_stereo_pair(self, rh_slave_speaker):
"""Create a stereo pair.
This speaker becomes the master, left-hand speaker of the stereo
pair. The ``rh_slave_speaker`` becomes the right-hand speaker.
Note that this operation will succeed on dissimilar speakers, unlike
when using the official Sonos apps.
Args:
rh_slave_speaker (SoCo): The speaker that will be added as
the right-hand, slave speaker of the stereo pair.
Raises:
SoCoUPnPException: if either speaker is already part of a
stereo pair.
"""
# The pairing operation must be applied to the speaker that will
# become the master (the left-hand speaker of the pair).
# Note that if either speaker is part of a group, the call will
# succeed.
param = self.uid + ":LF,LF;" + rh_slave_speaker.uid + ":RF,RF"
self.deviceProperties.AddBondedZones([("ChannelMapSet", param)])
def separate_stereo_pair(self):
"""Separate a stereo pair.
This can be called on either the master (left-hand) speaker, or on the
slave (right-hand) speaker, to create two independent zones.
Raises:
SoCoUPnPException: if the speaker is not a member of a stereo pair.
"""
self.deviceProperties.RemoveBondedZones(
[("ChannelMapSet", ""), ("KeepGrouped", "0")]
)
def switch_to_line_in(self, source=None):
"""Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-rincon-stream:{0}".format(uid)),
("CurrentURIMetaData", ""),
]
)
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
track_uri = response["TrackURI"]
return re.match(r"^x-rincon-mp3radio:", track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
track_uri = response["TrackURI"]
return re.match(r"^x-rincon-stream:", track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
track_uri = response["TrackURI"]
return re.match(r"^x-sonos-htastream:", track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-sonos-htastream:{0}:spdif".format(self.uid)),
("CurrentURIMetaData", ""),
]
)
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = "On" if led_on else "Off"
self.deviceProperties.SetLEDState(
[
("DesiredLEDState", led_state),
]
)
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
track = {
"title": "",
"artist": "",
"album": "",
"album_art": "",
"position": "",
}
track["playlist_position"] = response["Track"]
track["duration"] = response["TrackDuration"]
track["uri"] = response["TrackURI"]
track["position"] = response["RelTime"]
metadata = response["TrackMetaData"]
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track["metadata"] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != "" and track["duration"] == "0:00:00":
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = (
metadata.findtext(
".//{urn:schemas-rinconnetworks-com:" "metadata-1-0/}streamContent"
)
or ""
)
index = trackinfo.find(" - ")
if index > -1:
track["artist"] = trackinfo[:index]
track["title"] = trackinfo[index + 3 :]
else:
# Might find some kind of title anyway in metadata
track["title"] = metadata.findtext(
".//{http://purl.org/dc/" "elements/1.1/}title"
)
if not track["title"]:
track["title"] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ("", "NOT_IMPLEMENTED", None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(".//{http://purl.org/dc/elements/1.1/}title")
md_artist = metadata.findtext(
".//{http://purl.org/dc/elements/1.1/}creator"
)
md_album = metadata.findtext(
".//{urn:schemas-upnp-org:metadata-1-0/upnp/}album"
)
track["title"] = ""
if md_title:
track["title"] = md_title
track["artist"] = ""
if md_artist:
track["artist"] = md_artist
track["album"] = ""
if md_album:
track["album"] = md_album
album_art_url = metadata.findtext(
".//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI"
)
if album_art_url is not None:
track["album_art"] = self.music_library.build_album_art_full_uri(
album_art_url
)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
``(connect timeout, read timeout)`` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get(
"http://" + self.ip_address + ":1400/xml/device_description.xml",
timeout=timeout,
)
dom = XML.fromstring(response.content)
device = dom.find("{urn:schemas-upnp-org:device-1-0}device")
if device is not None:
self.speaker_info["zone_name"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}roomName"
)
# no zone icon in device_description.xml -> player icon
self.speaker_info["player_icon"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}iconList/"
"{urn:schemas-upnp-org:device-1-0}icon/"
"{urn:schemas-upnp-org:device-1-0}url"
)
self.speaker_info["uid"] = self.uid
self.speaker_info["serial_number"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}serialNum"
)
self.speaker_info["software_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}softwareVersion"
)
self.speaker_info["hardware_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}hardwareVersion"
)
self.speaker_info["model_number"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}modelNumber"
)
self.speaker_info["model_name"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}modelName"
)
self.speaker_info["display_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}displayVersion"
)
# no mac address - extract from serial number
mac = self.speaker_info["serial_number"].split(":")[0]
self.speaker_info["mac_address"] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo(
[
("InstanceID", 0),
]
)
playstate = {
"current_transport_status": "",
"current_transport_state": "",
"current_transport_speed": "",
}
playstate["current_transport_state"] = response["CurrentTransportState"]
playstate["current_transport_status"] = response["CurrentTransportStatus"]
playstate["current_transport_speed"] = response["CurrentSpeed"]
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse(
[
("ObjectID", "Q:0"),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", start),
("RequestedCount", max_items),
("SortCriteria", ""),
]
)
result = response["Result"]
metadata = {}
for tag in ["NumberReturned", "TotalMatches", "UpdateID"]:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse(
[
("ObjectID", "Q:0"),
("BrowseFlag", "BrowseMetadata"),
("Filter", "*"),
("StartingIndex", 0),
("RequestedCount", | |
is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
def get_slice(self, key, column_parent, predicate, consistency_level):
"""
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self.send_get_slice(key, column_parent, predicate, consistency_level)
return self.recv_get_slice()
def send_get_slice(self, key, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
args = get_slice_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_slice(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_slice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result");
def get_count(self, key, column_parent, predicate, consistency_level):
"""
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self.send_get_count(key, column_parent, predicate, consistency_level)
return self.recv_get_count()
def send_get_count(self, key, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
args = get_count_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_count(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_count_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result");
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return self.recv_multiget_slice()
def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
args = multiget_slice_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiget_slice(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = multiget_slice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result");
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self.send_multiget_count(keys, column_parent, predicate, consistency_level)
return self.recv_multiget_count()
def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
args = multiget_count_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiget_count(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = multiget_count_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result");
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
self.send_get_range_slices(column_parent, predicate, range, consistency_level)
return self.recv_get_range_slices()
def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
self._oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
args = get_range_slices_args()
args.column_parent = column_parent
args.predicate = predicate
args.range = range
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_range_slices(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_range_slices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result");
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
return self.recv_get_indexed_slices()
def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
self._oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
args = get_indexed_slices_args()
args.column_parent = column_parent
args.index_clause = index_clause
args.column_predicate = column_predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_indexed_slices(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_indexed_slices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result");
def insert(self, key, column_parent, column, consistency_level):
"""
Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self.send_insert(key, column_parent, column, consistency_level)
self.recv_insert()
def send_insert(self, key, column_parent, column, consistency_level):
self._oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
args = insert_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_insert(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = insert_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def add(self, key, column_parent, column, consistency_level):
"""
Increment or decrement a counter.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self.send_add(key, column_parent, column, consistency_level)
self.recv_add()
def send_add(self, key, column_parent, column, consistency_level):
self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
args = add_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = add_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
self.send_remove(key, column_path, timestamp, consistency_level)
self.recv_remove()
def send_remove(self, key, column_path, timestamp, consistency_level):
self._oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
args = remove_args()
args.key = key
args.column_path = column_path
args.timestamp = timestamp
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = remove_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def remove_counter(self, key, path, consistency_level):
"""
Remove a counter at the specified location.
Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
until the delete has reached all the nodes and all of them have been fully compacted.
Parameters:
- key
- path
- consistency_level
"""
self.send_remove_counter(key, path, consistency_level)
self.recv_remove_counter()
def send_remove_counter(self, key, path, consistency_level):
self._oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
args = remove_counter_args()
args.key = key
args.path = path
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_counter(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = remove_counter_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def batch_mutate(self, mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation | |
return new_dict
def set_list(group, name, list_):
"""
Saves list as a h5py group inside 'group'
Args:
group (h5py.Group):
name (str): Name of list
list_ (list):
Returns:
(None):
"""
lg = group.require_group(name)
lg.attrs['description'] = 'list'
for i, v in enumerate(list_):
if isinstance(v, (np.ndarray, h5py.Dataset)):
set_data(lg, str(i), v)
else:
set_attr(lg, str(i), v)
def get_list(group, name):
"""
Inverse of set_list
Args:
group (h5py.Group):
name (str): name of list in group
Returns:
(list):
"""
lg = group.get(name)
assert isinstance(lg, h5py.Group)
assert lg.attrs.get('description') == 'list'
all_keys = set(lg.keys()).union(lg.attrs.keys()) - {'description'}
vals = dict()
for k in lg.keys():
vals[k] = get_attr(lg, k)
if vals[k] is None: # For getting datasets, but will default to None if it doesn't exist
v = lg.get(k, None)
vals[k] = v if v is None else v[:]
return [vals[k] for k in sorted(vals)] # Returns list in original order
def save_dict_to_hdf_group(group: h5py.Group, dictionary: dict):
"""
Saves dictionary to a group (each entry can contain more dictionaries etc)
@param group:
@type group:
@param dictionary:
@type dictionary:
@return:
@rtype:
"""
group.attrs['description'] = 'simple dictionary'
for k, v in dictionary.items():
set_attr(group, k, v)
def load_dict_from_hdf_group(group: h5py.Group):
"""Inverse of save_simple_dict_to_hdf returning to same form"""
d = {}
for k, v in group.attrs.items():
if k != 'description':
d[k] = get_attr(group, k, None)
for k, g in group.items():
if isinstance(g, h5py.Group) and g.attrs.get('description') == 'simple dictionary':
d[k] = load_dict_from_hdf_group(g)
return d
def save_namedtuple_to_group(ntuple: NamedTuple, group: h5py.Group):
"""Saves named tuple inside group given"""
group.attrs['description'] = 'NamedTuple'
group.attrs['NT_name'] = ntuple.__class__.__name__
for key, val in ntuple._asdict().items():
set_attr(group, key, val) # Store as attrs of group in HDF
def save_dataclass_to_group(dataclass, group: h5py.Group):
"""Saves dataclass inside group given"""
assert is_dataclass(dataclass)
group.attrs['description'] = 'dataclass'
dc_name = dataclass.__class__.__name__
if 'DC_name' in group.attrs.keys() and (n := group.attrs['DC_name']) != dc_name:
raise TypeError(f'Trying to store dataclass with name {dc_name} where a dataclass with name {n} '
f'already exists')
elif 'DC_name' not in group.attrs.keys():
group.attrs['DC_name'] = dc_name
group.attrs['DC_class'] = getsource(dataclass.__class__)
# for key, val in asdict(dataclass).items(): # This tries to be too clever and turn everything into dicts, which does not work for lm.Parameters and I don't know what else
for k in dataclass.__annotations__:
v = getattr(dataclass, k)
set_attr(group, k, v)
def load_group_to_namedtuple(group: h5py.Group):
"""Returns namedtuple with name of group and key: values of group attrs
e.g. srs1 group which has gpib: 1... will be returned as an srs1 namedtuple with .gpib etc
"""
# Check it was stored as a namedTuple
if group.attrs.get('description', None) != 'NamedTuple':
raise ValueError(
f'Trying to load_group_to_named_tuple which has description: {group.attrs.get("description", None)}')
# Get the name of the NamedTuple either through the stored name or the group name
name = group.attrs.get('NT_name', None)
if name is None:
logger.warning('Did not find "name" attribute for NamedTuple, using folder name instead')
name = group.name.split('/')[-1]
# d = {key: val for key, val in group.attrs.items()}
d = {key: get_attr(group, key) for key in group.attrs.keys()}
# Remove HDF only descriptors
for k in ['description', 'NT_name']:
if k in d.keys():
del d[k]
# Make the NamedTuple
ntuple = namedtuple(name, d.keys())
filled_tuple = ntuple(**d) # Put values into tuple
return filled_tuple
# TODO: Move to better place
def _isnamedtupleinstance(x):
"""https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple"""
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple: return False
f = getattr(t, '_fields', None)
if not isinstance(f, tuple): return False
return all(type(n) == str for n in f)
def check_group_attr_overlap(group: h5py.Group, make_unique=False, exceptions=None):
"""Checks if there are any keys in a group which are the same as keys of attrs in that group"""
group_keys = group.keys()
attr_keys = group.attrs.keys()
exception_keys = set(exceptions) if exceptions else set()
if keys := (set(group_keys) & set(attr_keys)) - exception_keys:
if make_unique is True:
logger.info(f'In group [{group.name}], changing {keys} in attrs to make unique from group keys')
for key in keys:
setattr(group.attrs, f'{key}_attr', group.attrs[key])
del group.attrs[key]
else:
logger.warning(f'Keys: {keys} are keys in both the group and group attrs of {group.name}. No changes made.')
def match_name_in_group(names, data_group: Union[h5py.File, h5py.Group]):
"""
Returns the first name from names which is a dataset in data_group
Args:
names (): list of potential names in data_group
data_group (): The group (or hdf) to look for datasets in
Returns:
First name which is a dataset or None if not found
"""
names = CU.ensure_list(names)
for i, name in enumerate(names):
if name in data_group.keys() and isinstance(data_group[name], h5py.Dataset):
return name, i
logger.warning(f'[{names}] not found in [{data_group.name}]')
return None, None
class ThreadID:
def __init__(self, target_mode: str):
self.id = threading.get_ident()
self.target_mode = target_mode
self.current_status = None
class ThreadQueue:
def __init__(self):
"""Need to make sure this is only called once per process (i.e. threadlock wherever this is being created, and
check if it already exists first)"""
self._lock = threading.Lock()
self.queue = []
def put(self, entry: ThreadID):
"""
Put new thread into waiting queue
Args:
entry (ThreadID): ThreadID object to add to queue
Returns:
"""
with self._lock:
self.queue.append(entry)
def get_next(self):
with self._lock:
return self.queue.pop(0)
READ = tuple('r')
WRITE = tuple(('r+', 'w', 'w+', 'a'))
_NOT_SET = object()
class ThreadManager:
def __init__(self):
self.waiting_write_threads = 0
self.waiting_read_threads = 0
self.active_write_thread: Optional[Tuple[int, datetime.datetime]] = None # Only ever 1 write thread
self.active_read_threads: Dict[int, datetime.datetime] = dict() # Dict of {thread_id: datetime}
self.stashed_read_threads: Dict[int, datetime.datetime] = dict() # Dict of {thread_id: datetime}
self.lock = threading.RLock()
self.read_condition = threading.Condition(self.lock) # Intended to release all at once
self.write_condition = threading.Condition(self.lock) # Intended to only release 1 at a time
self.manager_condition = threading.Condition(self.lock) # Manages the read and write condition
self.manager_event = threading.Event() # Triggers manager to look for something
self.manager_thread = None
self.start_scheduler()
def get_condition(self, requested_mode: str):
if requested_mode == 'r':
condition = self.read_condition
elif requested_mode == 'w':
condition = self.write_condition
else:
raise NotImplementedError(f'{requested_mode} not recognized')
return condition
def add_to_queue(self, requested_mode: str):
"""Add thread into either 'r' or 'w' queue and then get manager to figure out if anything should start"""
with self.lock:
self.start_scheduler()
# thread_id = threading.get_ident()
#
# # TODO: Do I need to do this check here?
# if thread_id in self.active_read_threads:
# self.stash_active_read()
if requested_mode == 'r':
self.waiting_read_threads += 1
elif requested_mode == 'w':
self.waiting_write_threads += 1
self.manager_event.set()
return self.waiting_read_threads, self.waiting_write_threads
def stash_active_read(self):
"""Stashes current active read thread so that it can become a waiting write thread without waiting for itself"""
with self.lock:
thread_id = threading.get_ident()
if thread_id in self.active_read_threads:
if thread_id in self.stashed_read_threads:
raise RuntimeError(f'{thread_id} has already been stashed before. Should not be trying to stash '
f'again')
self.stashed_read_threads[thread_id] = self.active_read_threads.pop(thread_id)
elif self.active_write_thread and thread_id == self.active_write_thread[0]:
raise RuntimeError(f'{thread_id} is trying attempting to be stashed as a write thread. This should '
f'not happen')
else:
raise KeyError(f'{thread_id} is not an active thread')
def set_active(self, mode: str):
"""Adds current thread to active list"""
with self.lock:
thread_id = threading.get_ident()
if mode == 'r':
self.active_read_threads[thread_id] = datetime.datetime.now()
elif mode == 'w':
self.active_write_thread = (thread_id, datetime.datetime.now())
else:
raise NotImplementedError
# No updates to do upon adding threads (no self.manager_condition.notify())
def remove_active(self):
"""Removes current thread from active lists, and restores stashed read status if present
(i.e. if finishing the initial call to write part and returning back to read only)"""
with self.lock:
self.start_scheduler()
thread_id = threading.get_ident()
if thread_id in self.active_read_threads:
self.active_read_threads.pop(thread_id)
elif self.active_write_thread and self.active_write_thread[0] == thread_id:
self.active_write_thread = None
if thread_id in self.stashed_read_threads:
self.active_read_threads[thread_id] = self.stashed_read_threads.pop(thread_id)
else:
raise RuntimeError(f'{thread_id} not recognized')
self.manager_event.set()
return True
def get_active_threads(self) -> Dict[int, datetime.datetime]:
"""Returns a dict of all current running threads"""
with self.lock:
active = copy.copy(self.active_read_threads)
if self.active_write_thread:
id_, date = self.active_write_thread
active[id_] = date
return active
def get_stashed_read_threads(self) -> Dict[int, datetime.datetime]:
"""Returns a dict of all current stashed read threads (i.e. threads that were in read but wanted to go to write mode"""
with self.lock:
stashed = copy.copy(self.stashed_read_threads)
return stashed
def start_scheduler(self):
"""Starts the condition watcher thread which will let threads know when they can have access to the HDF"""
def watcher():
while True:
triggered = self.manager_event.wait(timeout=10) # Only check when updates to active_threads
with self.manager_condition: # Only aquire the manager lock here (waiting for event blocks otherwise)
if self.manager_event.is_set():
self.manager_event.clear()
self.notify_relevant_threads()
else:
# logger.error(f'{threading.get_ident()} manager thread timed out')
| |
from netapp.netapp_object import NetAppObject
class SnapmirrorInfo(NetAppObject):
"""
Information about the SnapMirror Relationship.
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_relationship_progress = None
@property
def relationship_progress(self):
"""
The total number of bytes that have been processed so far
for the current activity of the relationship as returned
in the relationship-status. This is set only when the
relationship-status indicates that an activity is in
progress.
Attributes: non-creatable, non-modifiable
"""
return self._relationship_progress
@relationship_progress.setter
def relationship_progress(self, val):
if val != None:
self.validate('relationship_progress', val)
self._relationship_progress = val
_newest_snapshot = None
@property
def newest_snapshot(self):
"""
The name of the newest Snapshot copy on the destination
volume.
Attributes: non-creatable, non-modifiable
"""
return self._newest_snapshot
@newest_snapshot.setter
def newest_snapshot(self, val):
if val != None:
self.validate('newest_snapshot', val)
self._newest_snapshot = val
_relationship_status = None
@property
def relationship_status(self):
"""
Specifies the status of the SnapMirror relationship.
Possible values are:
<ul>
<li>'idle' - Transfers are enabled, and no transfer is
in progress, <li>'transferring' - SnapMirror transfers
are enabled and a transfer is in progress,
<li>'checking'- The destination volume is undergoing a
diagnostic check and no transfer is in progress. Only
applicable for SnapMirror relationships with the
relationship-control-plane field set to 'v1',
<li>'quiescing' - A SnapMirror transfer is in progress,
additional transfers are disabled,
<li>'quiesced' - SnapMirror transfers are disabled, no
transfer is in progress,
<li>'queued' - SnapMirror transfers are enabled, and a
transfer operation has been accepted and queued in the
system,
<li>'preparing' - SnapMirror transfers are enabled,
currently in the pre-transfer phase for Vault incremental
transfers,
<li>'finalizing' - SnapMirror transfers are enabled,
currently in the post-transfer phase for vault
incremental transfers,
<li>'aborting' - SnapMirror transfers are enabled,
however a transfer abort operation that may include
removal of the checkpoint is underway.
</ul>
Attributes: non-creatable, non-modifiable
"""
return self._relationship_status
@relationship_status.setter
def relationship_status(self, val):
if val != None:
self.validate('relationship_status', val)
self._relationship_status = val
_last_transfer_type = None
@property
def last_transfer_type(self):
"""
The type of the previous transfer for the relationship.
This parameter is only available on Data ONTAP 8.2 or
later operating in Cluster-Mode if the relationship
control plane is 'v2'.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "initialize" ,
<li> "update" ,
<li> "resync" ,
<li> "restore" ,
<li> "check"
</ul>
"""
return self._last_transfer_type
@last_transfer_type.setter
def last_transfer_type(self, val):
if val != None:
self.validate('last_transfer_type', val)
self._last_transfer_type = val
_snapshot_checkpoint = None
@property
def snapshot_checkpoint(self):
"""
The number of bytes transferred as recorded for the
checkpoint of the current or most recent transfer
snapshot.
Attributes: non-creatable, non-modifiable
"""
return self._snapshot_checkpoint
@snapshot_checkpoint.setter
def snapshot_checkpoint(self, val):
if val != None:
self.validate('snapshot_checkpoint', val)
self._snapshot_checkpoint = val
_exported_snapshot_timestamp = None
@property
def exported_snapshot_timestamp(self):
"""
The timestamp of the exported Snapshot copy on the
destination volume, in seconds since Jan 1, 1970.
Attributes: non-creatable, non-modifiable
"""
return self._exported_snapshot_timestamp
@exported_snapshot_timestamp.setter
def exported_snapshot_timestamp(self, val):
if val != None:
self.validate('exported_snapshot_timestamp', val)
self._exported_snapshot_timestamp = val
_last_transfer_size = None
@property
def last_transfer_size(self):
"""
The total number of bytes transferred as part of the last
transfer. This parameter is available only on Data ONTAP
8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v2', and if the last
transfer was successful.
Attributes: non-creatable, non-modifiable
"""
return self._last_transfer_size
@last_transfer_size.setter
def last_transfer_size(self, val):
if val != None:
self.validate('last_transfer_size', val)
self._last_transfer_size = val
_destination_vserver = None
@property
def destination_vserver(self):
"""
Specifies the name of the destination Vserver for the
SnapMirror relationship. If using this parameter, the
following parameters should also be specified:
<ul>
<li> The name of destination volume.
<li> The name of the destination cluster on Data ONTAP
8.1, or on Data ONTAP 8.2 or later operating in
Cluster-Mode if the relationship control plane is 'v1'.
</ul>
Attributes: key, optional-for-create, non-modifiable
"""
return self._destination_vserver
@destination_vserver.setter
def destination_vserver(self, val):
if val != None:
self.validate('destination_vserver', val)
self._destination_vserver = val
_destination_volume = None
@property
def destination_volume(self):
"""
Specifies the name of the destination volume for the
SnapMirror relationship. If using this parameter, the
following parameters should also be specified:
<ul>
<li> The name of the destination Vserver.
<li> The name of the destination cluster on Data ONTAP
8.1 operating in Cluster-Mode, or on Data ONTAP 8.2 or
later operating in Cluster-Mode if the relationship
control plane is 'v1'.
</ul>
Attributes: key, optional-for-create, non-modifiable
"""
return self._destination_volume
@destination_volume.setter
def destination_volume(self, val):
if val != None:
self.validate('destination_volume', val)
self._destination_volume = val
_mirror_state = None
@property
def mirror_state(self):
"""
Specifies the mirror state of the SnapMirror
relationship. Possible values are:
<ul>
<li>'uninitialized' - Destination volume has not been
initialized,
<li>'snapmirrored' - Destination volume has been
initialized and is ready to receive SnapMirror updates,
<li>'broken-off' - Destination volume is RW and
snapshots are present.
</ul>
Attributes: non-creatable, non-modifiable
"""
return self._mirror_state
@mirror_state.setter
def mirror_state(self, val):
if val != None:
self.validate('mirror_state', val)
self._mirror_state = val
_is_healthy = None
@property
def is_healthy(self):
"""
False if the last manual or scheduled update failed or
was aborted, or if the last scheduled update was delayed.
Otherwise true.
Attributes: non-creatable, non-modifiable
"""
return self._is_healthy
@is_healthy.setter
def is_healthy(self, val):
if val != None:
self.validate('is_healthy', val)
self._is_healthy = val
_last_transfer_duration = None
@property
def last_transfer_duration(self):
"""
The amount of time in seconds it took for the last
transfer to complete. This parameter is available only on
Data ONTAP 8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v2', and if the last
transfer was successful.
Attributes: non-creatable, non-modifiable
"""
return self._last_transfer_duration
@last_transfer_duration.setter
def last_transfer_duration(self, val):
if val != None:
self.validate('last_transfer_duration', val)
self._last_transfer_duration = val
_last_transfer_error = None
@property
def last_transfer_error(self):
"""
A message describing the cause of the last transfer
failure. This parameter is only available on Data ONTAP
8.2 or later operating in Cluster-Mode, if the
relationship control plane is 'v2', and if the last
transfer was unsuccessful.
Attributes: non-creatable, non-modifiable
"""
return self._last_transfer_error
@last_transfer_error.setter
def last_transfer_error(self, val):
if val != None:
self.validate('last_transfer_error', val)
self._last_transfer_error = val
_destination_cluster = None
@property
def destination_cluster(self):
"""
Specifies the destination cluster name for the SnapMirror
relationship. The parameters for the name of the
destination Vserver, and the name of the destination
volume must also be specified if using this parameter.
This parameter is available only on:
<ul>
<li> Data ONTAP 8.1 operating in Cluster-Mode.
<li> Data ONTAP 8.2 or later operating in Cluster-Mode if
the relationship control plane is 'v1'.
</ul>
Attributes: key, optional-for-create, non-modifiable
"""
return self._destination_cluster
@destination_cluster.setter
def destination_cluster(self, val):
if val != None:
self.validate('destination_cluster', val)
self._destination_cluster = val
_is_constituent = None
@property
def is_constituent(self):
"""
Specifies whether or not the SnapMirror relationship is
between Infinite Volume constituent volumes.
Attributes: non-creatable, non-modifiable
"""
return self._is_constituent
@is_constituent.setter
def is_constituent(self, val):
if val != None:
self.validate('is_constituent', val)
self._is_constituent = val
_source_vserver = None
@property
def source_vserver(self):
"""
Specifies the name of the source Vserver for the
SnapMirror relationship. If using this parameter, the
following parameters should also be specified:
<ul>
<li> The name of source volume.
<li> The name of the source cluster on Data ONTAP 8.1, or
on Data ONTAP 8.2 or later operating in Cluster-Mode if
the relationship control plane is 'v1'.
</ul>
Attributes: key, optional-for-create, non-modifiable
"""
return self._source_vserver
@source_vserver.setter
def source_vserver(self, val):
if val != None:
| |
self.id()
pi_names = ['%s_phy_intf_%s' % (test_id, i) for i in range(1, 5)]
pi_objs = self._create_pi_objects(pr_obj, pi_names)
pi1_fq_name, pi2_fq_name, pi3_fq_name, pi4_fq_name = [
pi_objs.get(pi_name).get_fq_name() for pi_name in pi_names]
pi1_obj, pi2_obj, pi3_obj, pi4_obj = [
pi_objs.get(pi_name) for pi_name in pi_names]
# Create VPG
vpg_objs = self._create_vpgs(fabric_obj, ['vpg-1'])
vpg_obj = vpg_objs['vpg-1']
vpg_name = vpg_obj.get_fq_name()
# Create VN
vn_names = ['vn-%s-%s' % (test_id, count) for count in range(1, 3)]
vn_objs = self._create_vns(proj_obj, vn_names)
vn1_obj, vn2_obj = [vn_objs[vn_name] for vn_name in vn_names]
# create a vmi with two phy int, tagged vlan case
vmi_infos = [
{'name': '%s-1' % test_id, 'vmi_id': '1',
'parent_obj': proj_obj, 'vn': vn1_obj, 'vpg': vpg_obj.uuid,
'fabric': fabric_name, 'pis': [pi1_fq_name, pi2_fq_name],
'vlan': vlan_1, 'is_untagged': False}]
vmi_objs = self._create_vmis(vmi_infos)
vmi1_obj = vmi_objs.get('%s-1' % test_id)
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
# verify that it has refs to two phy intf
phy_refs = vpg_obj.get_physical_interface_refs()
assert len(phy_refs) == 2
# Verify if Znodes are created for VMI
mock_zk = self._api_server._db_conn._zk_db
# Path for ZNode1 creation
if validation == 'serviceprovider':
tagged_validation_path = os.path.join(
_DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,
'virtual-port-group:%s' % vpg_obj.uuid,
'virtual-network:%s' % vn1_obj.uuid,
'vlan:%s' % vlan_1)
else:
tagged_vlan_validation_path = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg_obj.uuid,
'virtual-network:%s' % vn1_obj.uuid)
znode_vlan_1_id = mock_zk._zk_client.read_node(
tagged_vlan_validation_path)
tagged_validation_path = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg_obj.uuid,
'vlan:%s' % znode_vlan_1_id)
# Read Znode1
znode_vmi_1_uuid = mock_zk._zk_client.read_node(tagged_validation_path)
# Verify if correct Znode is created
assert znode_vmi_1_uuid == vmi1_obj.uuid, \
"Znode for VMI_1 (%s) doesn't exist" % vmi1_obj.uuid
# remove one phy.int from VMI and ensure annotations remains
# same
kv_pairs = self._create_kv_pairs(pi1_fq_name, fabric_name, vpg_name)
vmi1_obj.set_virtual_machine_interface_bindings(kv_pairs)
self.api.virtual_machine_interface_update(vmi1_obj)
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
# verify that it has refs to one phy intf
phy_refs = vpg_obj.get_physical_interface_refs()
assert len(phy_refs) == 1
# Read Znode1
znode_vmi_1_uuid = mock_zk._zk_client.read_node(tagged_validation_path)
# Verify if correct Znode is created
assert znode_vmi_1_uuid == vmi1_obj.uuid, \
"Znode for VMI_1 (%s) doesn't exist" % vmi1_obj.uuid
# create a vmi with two phy int, untagged vlan case
# this VMI addition replaces phy.int added previous
# VMI. Per DM, this is the current implementation
vmi_infos = [
{'name': '%s-2' % test_id, 'vmi_id': '2',
'parent_obj': proj_obj, 'vn': vn2_obj, 'vpg': vpg_obj.uuid,
'fabric': fabric_name, 'pis': [pi3_fq_name, pi4_fq_name],
'vlan': vlan_2, 'is_untagged': True}]
vmi_objs = self._create_vmis(vmi_infos)
vmi2_obj = vmi_objs.get('%s-2' % test_id)
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
# verify that it has refs to two phy intf
phy_refs = vpg_obj.get_physical_interface_refs()
assert len(phy_refs) == 2
# remove one phy.int from VMI
# this is untagged case
kv_pairs = self._create_kv_pairs(
pi3_fq_name, fabric_name, vpg_name, tor_port_vlan_id=vlan_2)
vmi2_obj.set_virtual_machine_interface_bindings(kv_pairs)
self.api.virtual_machine_interface_update(vmi2_obj)
# verify that it has refs to one phy intf
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
phy_refs = vpg_obj.get_physical_interface_refs()
assert len(phy_refs) == 1
# Path for ZNode2 creation
if validation == 'serviceprovider':
untagged_validation_path = os.path.join(
_DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,
'virtual-port-group:%s' % vpg_obj.uuid,
'untagged')
else:
untagged_validation_path = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg_obj.uuid,
'untagged')
# Read Znode2
znode_vmi_2_uuid = mock_zk._zk_client.read_node(
untagged_validation_path)
# Verify if correct Znode is created
assert znode_vmi_2_uuid == vmi2_obj.uuid, \
"Znode for VMI_2 (%s) doesn't exist" % vmi2_obj.uuid
# Delete VMIs from VPG
self.api.virtual_machine_interface_delete(id=vmi1_obj.uuid)
self.api.virtual_machine_interface_delete(id=vmi2_obj.uuid)
self.api.virtual_port_group_delete(id=vpg_obj.uuid)
self.api.physical_interface_delete(id=pi1_obj.uuid)
self.api.physical_interface_delete(id=pi2_obj.uuid)
self.api.physical_interface_delete(id=pi3_obj.uuid)
self.api.physical_interface_delete(id=pi4_obj.uuid)
self.api.physical_router_delete(id=pr_obj.uuid)
self.api.fabric_delete(id=fabric_obj.uuid)
def test_enterprise_delete_existing_pi_from_vmi(self):
"""Verify deleting a PI from VMI."""
validation = 'enterprise'
proj_obj, fabric_obj, pr_obj = self._create_prerequisites()
self._test_delete_existing_pi_from_vmi(
proj_obj, fabric_obj, pr_obj, validation)
def test_sp_delete_existing_pi_from_vmi(self):
"""Verify deleting a PI from VMI."""
validation = 'serviceprovider'
proj_obj, fabric_obj, pr_obj = self._create_prerequisites(
enterprise_style_flag=False)
self._test_delete_existing_pi_from_vmi(
proj_obj, fabric_obj, pr_obj, validation)
def test_verify_sp_reinit_remove_annotations(self):
"""Verify if annotations are removed from the service provider."""
proj_obj, fabric_obj, pr_obj = self._create_prerequisites(
enterprise_style_flag=False)
esi_id = '00:11:22:33:44:55:66:77:88:99'
vlan_1 = 42
vlan_2 = '4094'
pi_name = self.id() + '_physical_interface1'
pi = PhysicalInterface(name=pi_name,
parent_obj=pr_obj,
ethernet_segment_identifier=esi_id)
pi_uuid = self._vnc_lib.physical_interface_create(pi)
pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)
fabric_name = fabric_obj.get_fq_name()
pi_fq_name = pi_obj.get_fq_name()
# Create VPG
vpg_name = "vpg-1"
vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)
vpg_uuid = self.api.virtual_port_group_create(vpg)
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)
vpg_name = vpg_obj.get_fq_name()
# Create single VN
vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)
self.api.virtual_network_create(vn1)
# Create a VMI that's attached to vpg-1 and having reference
# to vn1
vmi_obj_1 = VirtualMachineInterface(self.id() + "1",
parent_obj=proj_obj)
vmi_obj_1.set_virtual_network(vn1)
# Create KV_Pairs for this VMI
kv_pairs = self._create_kv_pairs(pi_fq_name,
fabric_name,
vpg_name)
vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs)
vmi_obj_1.set_virtual_machine_interface_properties(
VirtualMachineInterfacePropertiesType(
sub_interface_vlan_tag=vlan_1))
vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj_1)
vpg_obj.add_virtual_machine_interface(vmi_obj_1)
mock_zk = self._api_server._db_conn._zk_db
# Verify if Znode are created for VMI1
validation_node1 = os.path.join(
_DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,
'virtual-port-group:%s' % vpg_uuid,
'virtual-network:%s' % vn1.uuid,
'vlan:%s' % vlan_1)
# Read Znode
znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)
# Verify if correct Znodes are created
assert znode_vmi_1_uuid == vmi_uuid_1, \
"Znode for VMI_1 (%s) doesn't exist" % vmi_uuid_1
# Attach Second VMI with untagged vlan
vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)
self.api.virtual_network_create(vn2)
# Create first untagged VMI and attach it to Virtual Port Group
vmi_obj_2 = VirtualMachineInterface(self.id() + "2",
parent_obj=proj_obj)
vmi_obj_2.set_virtual_network(vn2)
# Create KV_Pairs for this VMI with an untagged VLAN
# If tor_port_vlan_id is set, then it signifies a untagged VMI
kv_pairs = self._create_kv_pairs(pi_fq_name,
fabric_name,
vpg_name,
tor_port_vlan_id=vlan_2)
vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)
vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
vpg_obj.add_virtual_machine_interface(vmi_obj_2)
# Verify if Znode are created for VMI2
validation_node2 = os.path.join(
_DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,
'virtual-port-group:%s' % vpg_uuid,
'untagged')
# Set annotations in VPG
kvp_array = []
kvp = KeyValuePair(validation_node1, vmi_uuid_1)
kvp_array.append(kvp)
kvp = KeyValuePair(validation_node2, vmi_uuid_2)
kvp_array.append(kvp)
kvps = KeyValuePairs()
kvps.set_key_value_pair(kvp_array)
vpg_obj.set_annotations(kvps)
self.api.virtual_port_group_update(vpg_obj)
# Read Znode
znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)
# Verify if correct Znodes are created
assert znode_vmi_2_uuid == vmi_uuid_2, \
"Znode for VMI_2 (%s) doesn't exist" % vmi_uuid_2
# Delete all Znodes for VMI1, VMI2
mock_zk._zk_client.delete_node(validation_node1, True)
mock_zk._zk_client.delete_node(validation_node2, True)
# Verify if annotations are added in VPG object
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)
vpg_annotations = vpg_obj.get_annotations()
assert vpg_annotations, \
"Annotations not added for VPG (%s)" % vpg_obj.uuid
# manually setting contrail_version to 21.4
# so db_resync is run as part of upgrade scenario
self._api_server._args.contrail_version = '21.4'
self._api_server._db_conn._db_resync_done.clear()
# API server DB reinit
self._api_server._db_init_entries()
self._api_server._db_conn.wait_for_resync_done()
# Verify if Znodes are added back
znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)
znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)
# Verify if correct Znodes are created
assert znode_vmi_1_uuid == vmi_uuid_1, \
"Znode for VMI_1 (%s) doesn't exist" % vmi_uuid_1
assert znode_vmi_2_uuid == vmi_uuid_2, \
"Znode for VMI_2 (%s) doesn't exist" % vmi_uuid_2
# Verify if annotations are removed from VPG object
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)
vpg_annotations = vpg_obj.get_annotations()
assert vpg_annotations is None, \
"Annotations not removed for VPG (%s)" % vpg_obj.uuid
# Delete VMIs from VPG
self.api.virtual_machine_interface_delete(id=vmi_uuid_1)
self.api.virtual_machine_interface_delete(id=vmi_uuid_2)
self.api.virtual_port_group_delete(id=vpg_obj.uuid)
self.api.physical_interface_delete(id=pi_uuid)
self.api.physical_router_delete(id=pr_obj.uuid)
self.api.fabric_delete(id=fabric_obj.uuid)
# adding back zknode to original version
# so other test cases runs from the begining
mock_zk._zk_client.update_node(PATH_SYNC, '2011')
def test_verify_enterprise_reinit_remove_annotations(self):
"""Verify if annotations are removed from the enterprise."""
proj_obj, fabric_obj, pr_obj = self._create_prerequisites(
enterprise_style_flag=True)
esi_id = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:88:99'
vlan_1 = 42
vlan_2 = '4094'
pi_name = self.id() + '_physical_interface1'
pi = PhysicalInterface(name=pi_name,
parent_obj=pr_obj,
ethernet_segment_identifier=esi_id)
pi_uuid = self._vnc_lib.physical_interface_create(pi)
pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)
fabric_name = fabric_obj.get_fq_name()
pi_fq_name = pi_obj.get_fq_name()
# Create VPG
vpg_name = "vpg-1"
vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)
vpg_uuid = self.api.virtual_port_group_create(vpg)
vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)
vpg_name = vpg_obj.get_fq_name()
# Create single VN
vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)
self.api.virtual_network_create(vn1)
# Create a VMI that's attached to vpg-1 and having reference
# to vn1
vmi_obj_1 = VirtualMachineInterface(self.id() + "1",
parent_obj=proj_obj)
vmi_obj_1.set_virtual_network(vn1)
# Create KV_Pairs for this VMI
kv_pairs = self._create_kv_pairs(pi_fq_name,
fabric_name,
vpg_name)
vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs)
vmi_obj_1.set_virtual_machine_interface_properties(
VirtualMachineInterfacePropertiesType(
sub_interface_vlan_tag=vlan_1))
vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj_1)
vpg_obj.add_virtual_machine_interface(vmi_obj_1)
mock_zk = self._api_server._db_conn._zk_db
# Verify if Znode are created for VMI1
tagged_validation_node1 = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg.uuid,
'virtual-network:%s' % vn1.uuid)
znode_vlan_1_id = mock_zk._zk_client.read_node(
tagged_validation_node1)
validation_node1 = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg.uuid,
'vlan:%s' % znode_vlan_1_id)
# Read Znode
znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)
# Verify if correct Znodes are created
assert znode_vmi_1_uuid == vmi_uuid_1, \
"Znode for VMI_1 (%s) doesn't exist" % vmi_uuid_1
# Attach Second VMI with untagged vlan
vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)
self.api.virtual_network_create(vn2)
# Create first untagged VMI and attach it to Virtual Port Group
vmi_obj_2 = VirtualMachineInterface(self.id() + "2",
parent_obj=proj_obj)
vmi_obj_2.set_virtual_network(vn2)
# Create KV_Pairs for this VMI with an untagged VLAN
# If tor_port_vlan_id is set, then it signifies a untagged VMI
kv_pairs = self._create_kv_pairs(pi_fq_name,
fabric_name,
vpg_name,
tor_port_vlan_id=vlan_2)
vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)
vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)
vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)
vpg_obj.add_virtual_machine_interface(vmi_obj_2)
# Verify if Znode are created for VMI2
validation_node2 = os.path.join(
_DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,
'virtual-port-group:%s' % vpg_uuid,
'untagged')
# Set annotations in VPG
kvp_array = []
kvp = KeyValuePair(validation_node1, vmi_uuid_1)
kvp_array.append(kvp)
kvp = KeyValuePair(validation_node2, vmi_uuid_2)
kvp_array.append(kvp)
kvps = KeyValuePairs()
kvps.set_key_value_pair(kvp_array)
vpg_obj.set_annotations(kvps)
self.api.virtual_port_group_update(vpg_obj)
# Read Znode
znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import gevent
from gevent import monkey, queue
from gevent.pool import Pool
from greenlet import GreenletExit
import os
import imp
import json
import time
import logging
import traceback
import datetime
from .utils import merge_cookie, load_module
from .mq import build_queue
from .fetcher import Fetcher
from .config import import_config, Config
from .db import Session, ScopedSession, SpiderProject, SpiderTask, SpiderScheduler,SpiderResult
from .cron import CronJob
from .processor import EasyProcessor
OK=True
START=1
STOP=-1
class EasyCrawler:
def __init__(self, timeout=5, workers_count=1, min_capacity=10, pipeline_size=100, loop_once=False):
self.db = ScopedSession()
# print(self.db)
self.spiders = {}
self.queues = {}
self.processors = {}
self.projects = {}
self.jobs = {}
self.pool = Pool(1000)
self.timeout = timeout
self.loop_once = loop_once
self.qin = build_queue("redis", qname="command_q")
# self.qout = build_queue("redis")
self.jobs['processors'] = {}
self.jobs['common_tasks'] = []
self.load_spiders()
# print("projects:",self.projects)
# self.jobs['loader'] = self.pool.spawn(self.do_loader)
# self.queues['common'] = build_queue("redis", qname="common_q")
# for i in range(workers_count):
# project = Config()
# project.name = 'common'
# project.queue_name = 'common'
# job = self.pool.spawn(self.do_worker, project)
# self.jobs['common_tasks'] += [job]
# for project in self.projects:
# self.jobs += [gevent.spawn(self.do_worker, project.name, self.spiders.get(project.name))]
# self.jobs += [gevent.spawn(self.do_worker) for i in range(workers_count)]
# self.jobs += [gevent.spawn(self.do_pipeline)]
# self.job_count = len(self.jobs)
# self.lock = threading.Lock()
self.fetcher = Fetcher()
def load_spiders(self):
projects = SpiderProject.load_projects() #query.filter_by(status=1).all()
names = []
for project in projects:
names.append(project.name)
for project_name in names:
self._load_project(project_name)
# logging.debug("task queues:", self.queues)
def shutdown(self):
self.pool.kill()
def start(self):
self.jobs['scheduler'] = self.pool.spawn(self.do_scheduler)
self.jobs['command'] = self.pool.spawn(self.do_command)
self.pool.join()
# self.pool.start(self.jobs['scheduler'])
# self.pool.start(self.jobs['command'])
# gevent.joinall(jobs)
def do_command(self):
while True:
command = self.qin.get()
logging.debug("get a command %s" % json.dumps(command))
if command is None:
gevent.sleep(5)
continue
if 'op' in command:
op = command.get('op')
if op == 'exit':
self.pool.kill()
else:
target_type = command.get('type') #project, task
target = command.get('target')
logging.debug("get a %s command: %s/%s" % (op, target_type, target))
if target_type == 'project':
if op == 'start':
self._load_project(target)
if op == 'stop':
if target == 'all':
for prj in self.projects:
self._unload_project(prj)
else:
self._unload_project(target)
if op == 'reload':
self._unload_project(target)
job = self._load_project(target)
if job is not None:
self.pool.start(job)
# def _load_module(self, filepath):
# class_name = None
# expected_class = 'Spider'
# mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
# if file_ext.lower() == '.py':
# py_mod = imp.load_source(mod_name, filepath)
# if hasattr(py_mod, expected_class):
# class_name = getattr(py_mod, expected_class)
# return class_name
# def _load_project(self, project):
# logging.debug("load project %s" % project)
# if project in self.projects:
# return
# try:
# dbproject = SpiderProject.query.filter_by(name=project).first()
# if dbproject is None:
# logging.error("project %s not exits, can't loading." % project)
# return
# newproject = Config()
# newproject.name = project
# newproject.load_time = time.time()
# if dbproject.queue_name == 'common' or dbproject.queue_name is None:
# newproject.queue_name = 'common'
# else:
# newproject.queue_name = dbproject.queue_name
# spider_cls = self._load_module("projects/%s/spider.py" % project)# import_object("projects.%s.spider.Spider"% (project))
# if spider_cls is None:
# logging.error("import module %s error.!" % project)
# return
# logging.debug("import spider %s" % project)
# config = import_config("projects/%s/project.yaml" % (project))
# spider = spider_cls()
# spider.config = config
# spider.status = START
# new_tasks=spider._on_start()
# self.spiders[project] = spider
# self.projects[project] = newproject
# if newproject.queue_name != 'common':
# self.queues[project] = build_queue("redis", qname="q_"+project)
# job = self.pool.spawn(self.do_worker, newproject)
# self.jobs['tasks'][project] = job
# self.pool.start(job)
# if new_tasks is not None:
# queue = self.queues.get(newproject.queue_name)
# for task in new_tasks:
# queue.put_first(task)
# except Exception as e:
# logging.error("load spider!\n%s" % traceback.format_exc())
# raise e
def _load_project(self, project):
logging.debug("load project %s" % project)
if project in self.projects:
return
return self._load_processor(project)
def _load_processor(self, project):
logging.debug("load processor %s" % project)
try:
dbproject = SpiderProject.query.filter_by(name=project).first()
if dbproject is None:
logging.error("project %s not exits, can't loading." % project)
return
newproject = Config()
newproject.name = project
newproject.load_time = time.time()
if dbproject.queue_name is None:
newproject.queue_name = dbproject.name
else:
newproject.queue_name = dbproject.queue_name
spider_cls = load_module("projects/%s/spider.py" % project)# import_object("projects.%s.spider.Spider"% (project))
if spider_cls is None:
logging.error("import module %s error.!" % project)
return
logging.debug("import spider %s" % project)
config = import_config("projects/%s/project.yaml" % (project))
spider = spider_cls()
spider.config = config
spider.status = START
self.spiders[project] = spider
self.projects[project] = newproject
queue_name = 'q_'+project #+"_"+str(int(time.time()))
spider._on_start()
# if new_tasks is not None:
# queue = build_queue('redis', queue_name)# self.queues.get(newproject.queue_name)
# for task in new_tasks:
# queue.put_first(task)
# queue.close()
if newproject.queue_name != 'common':
self.queues[project] = queue_name #build_queue("redis", qname="q_"+project)
processor = EasyProcessor(project, spider, queue_name)
job = self.pool.spawn(processor.run)
self.jobs['processors'][project] = job
# self.pool.start(job)
self.processors[project] = processor
return job
except Exception as e:
logging.error("load spider!\n%s" % traceback.format_exc())
raise e
return
def _unload_project(self, project):
logging.debug("unload project %s" % (project))
old_project = self.projects.get(project)
if old_project is None:
return
if old_project.queue_name != 'common':
qname = self.processors.get(project)
if qname is not None:
# inq = build_queue('redis', qname=qname)
# inq.put_first('')
processor = self.processors[project]
processor.status = 0
del self.processors[project]
del self.queues[project]
# job = self.jobs['tasks'].get(project)
# if job is not None:
# self.pool.killone(job, block=False)
# del self.jobs['tasks'][project]
# if project in self.queues:
# del self.queues[project]
# spider = self.spiders.get(project)
# spider.status = STOP
del self.spiders[project]
del self.projects[project]
def do_scheduler(self):
logging.debug("scheduler thread start")
while True:
now = datetime.datetime.now()
nowts = time.time()
new_projects = []
schedulers = self.db.query(SpiderScheduler).filter(SpiderScheduler.next_time<=nowts).all()
logging.debug("load schedulers length is %d" % (len(schedulers)))
for s in schedulers:
if s.project not in self.projects:
logging.debug("project [%s] doesn't started." % (s.project))
continue
if s.process is None or s.process == "":
process = {}
else:
process = json.loads(s.process)
task = {}
task['type'] = 'scheduler'
task['id'] = s.id
task['project'] = s.project
task['task_id'] = s.task_id
task['url'] = s.url
if 'crontab' in process:
crons = process.get('crontab')
cronjob = CronJob(crons)
next_time = cronjob.next(now)
# print("do scheduler", crons, now, nowts,next_time)
s.next_time = next_time
s.last_time = nowts
self.db.add(s)
if 'callback' in process:
task['callback'] = process.get('callback')
project = self.projects.get(s.project)
if project is not None:
inqname = self.queues.get(s.project)
# print("project queue:", s.project, inqname)
inq = build_queue('redis', qname=inqname)
inq.put_first(task)
inq.close()
new_projects.append(s.project)
self.db.commit()
logging.debug("load new projects :%s"% str(new_projects))
for project in new_projects:
processor = self.processors.get(project)
if processor is None or processor.status != 1:
self._load_processor(project)
gevent.sleep(10)
def do_processor(self, processor):
processor.run()
# def do_loader(self):
# try:
# while True:
# now = time.time()
# for k,project in self.projects.items():
# if project.load_time > now:
# continue
# spider = self.spiders.get(project.name)
# if spider is None or spider.status != START:
# logging.warn("spider (%s) is not ready." % project.name)
# print(spider, spider.status)
# continue
# taskq = self.queues.get(project.queue_name)
# if (project.queue_name != 'common' and taskq.qsize() > 0) or (project.queue_name=='common' and taskq.qsize() >= 1000):
# continue
# new_tasks = self._load_tasks(project.name)
# tasks_size = len(new_tasks)
# if tasks_size <= 0:
# logging.info('project [%s] load no task' % project.name)
# #project.load_time = now + 10*1000
# continue
# else:
# logging.info('project [%s] load %d tasks' % (project.name, tasks_size))
# for task in new_tasks:
# # print("put in queue", json.dumps(task))
# taskq.put(task)
# else:
# gevent.sleep(2)
# except Exception as e:
# logging.error("Scheduler Error!\n%s" % traceback.format_exc())
def do_worker(self, project):
try:
# task = self.qin.get()
taskq = self.queues.get(project.name)
while True:
try:
task = taskq.get()
if task == StopIteration or task == '':
logging.info("worker [%s] get none task, exit do worker" % (project.name))
break
if task is None:
gevent.sleep(2)
logging.info("worker [%s] get no task, sleep 2 seconds" % (project.name))
continue
logging.info("do worker get a task", task)
project_name = task.get('project')
if project_name is None:
logging.error("worker get a task from queue [%s] has no project, ignore. %s" % (project.queue_name, json.dumps(task)))
continue
spider = self.spiders.get(project_name)
if spider is None:
logging.error("worker [%s] get no task, no spider, continue" % (project_name))
continue
# print("project %s task: " % project, task)
self.do_fetch(project_name, spider, task)
except GreenletExit as ge:
# logging.info("Worker %s ")
pass
except:
logging.error("Worker error!\n%s" % traceback.format_exc())
finally:
logging.debug("Worker done, ========================== job count: %s" % project_name)
def do_fetch(self, project, spider, task):
logging.debug("do fetch task: %s" % task)
if project != task['project']:
pass
headers = spider.config.headers
url = task.get('url')
task_id = task.get('task_id')
if url.startswith('data://'):
callback = task.get('callback')
if callback is not None:
callback_func = getattr(spider, callback)
if callback_func:
callback_func()
else:
response = self.fetcher.fetch(spider, task, headers)
if 'set-cookie' in response:
new_cookie = response['set-cookie']
old_cookie = headers.get('Cookie', None)
| |
"""
This module provides the Scan Op
Scanning is a general form of recurrence, which can be used for looping.
The idea is that you *scan* a function along some input sequence, producing
an output at each time-step that can be seen (but not modified) by the
function at the next time-step. (Technically, the function can see the
previous K time-steps of your outputs and L time steps (from past and
future) of your inputs.
So for example, ``sum()`` could be computed by scanning the ``z+x_i``
function over a list, given an initial state of ``z=0``.
Special cases:
* A *reduce* operation can be performed by using only the last
output of a ``scan``.
* A *map* operation can be performed by applying a function that
ignores previous steps of the outputs.
Often a for-loop or while-loop can be expressed as a ``scan()`` operation,
and ``scan`` is the closest that theano comes to looping. The advantages
of using ``scan`` over `for` loops in python (amongs other) are:
* it allows the number of iterations to be part of the symbolic graph
* it allows computing gradients through the for loop
* there exist a bunch of optimizations that help re-write your loop
such that less memory is used and that it runs faster
* it ensures that data is not copied from host to gpu and gpu to
host at each step
The Scan Op should typically be used by calling any of the following
functions: ``scan()``, ``map()``, ``reduce()``, ``foldl()``,
``foldr()``.
"""
__docformat__ = 'restructedtext en'
__authors__ = ("<NAME> "
"<NAME> "
"<NAME> "
"<NAME> ")
__copyright__ = "(c) 2010, Universite de Montreal"
__contact__ = "<NAME> <<EMAIL>>"
import logging
import numpy
from six.moves import xrange
from theano import gof
from theano.compat import izip
from theano.tensor import opt, TensorVariable
from theano.tensor.sharedvar import TensorSharedVariable
from theano import tensor
from theano.scalar.sharedvar import shared as scalar_shared
from theano.compile.pfunc import rebuild_collect_shared
from . import scan_op
from . import scan_utils
# Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_module.scan')
def scan(fn,
sequences=None,
outputs_info=None,
non_sequences=None,
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=None,
name=None,
options=None,
profile=False):
"""
This function constructs and applies a Scan op to the provided
arguments.
:param fn:
``fn`` is a function that describes the operations involved in one
step of ``scan``. ``fn`` should construct variables describing the
output of one iteration step. It should expect as input theano
variables representing all the slices of the input sequences
and previous values of the outputs, as well as all other arguments
given to scan as ``non_sequences``. The order in which scan passes
these variables to ``fn`` is the following :
* all time slices of the first sequence
* all time slices of the second sequence
* ...
* all time slices of the last sequence
* all past slices of the first output
* all past slices of the second otuput
* ...
* all past slices of the last output
* all other arguments (the list given as `non_sequences` to
scan)
The order of the sequences is the same as the one in the list
`sequences` given to scan. The order of the outputs is the same
as the order of ``outputs_info``. For any sequence or output the
order of the time slices is the same as the one in which they have
been given as taps. For example if one writes the following :
.. code-block:: python
scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])
, Sequence2
, dict(input = Sequence3, taps = 3) ]
, outputs_info = [ dict(initial = Output1, taps = [-3,-5])
, dict(initial = Output2, taps = None)
, Output3 ]
, non_sequences = [ Argument1, Argument 2])
``fn`` should expect the following arguments in this given order:
#. ``Sequence1[t-3]``
#. ``Sequence1[t+2]``
#. ``Sequence1[t-1]``
#. ``Sequence2[t]``
#. ``Sequence3[t+3]``
#. ``Output1[t-3]``
#. ``Output1[t-5]``
#. ``Output3[t-1]``
#. ``Argument1``
#. ``Argument2``
The list of ``non_sequences`` can also contain shared variables
used in the function, though ``scan`` is able to figure those
out on its own so they can be skipped. For the clarity of the
code we recommend though to provide them to scan. To some extend
``scan`` can also figure out other ``non sequences`` (not shared)
even if not passed to scan (but used by `fn`). A simple example of
this would be :
.. code-block:: python
import theano.tensor as TT
W = TT.matrix()
W_2 = W**2
def f(x):
return TT.dot(x,W_2)
The function is expected to return two things. One is a list of
outputs ordered in the same order as ``outputs_info``, with the
difference that there should be only one output variable per
output initial state (even if no tap value is used). Secondly
`fn` should return an update dictionary (that tells how to
update any shared variable after each iteration step). The
dictionary can optionally be given as a list of tuples. There is
no constraint on the order of these two list, ``fn`` can return
either ``(outputs_list, update_dictionary)`` or
``(update_dictionary, outputs_list)`` or just one of the two (in
case the other is empty).
To use ``scan`` as a while loop, the user needs to change the
function ``fn`` such that also a stopping condition is returned.
To do so, he/she needs to wrap the condition in an ``until`` class.
The condition should be returned as a third element, for example:
.. code-block:: python
...
return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50)
Note that a number of steps (considered in here as the maximum
number of steps ) is still required even though a condition is
passed (and it is used to allocate memory if needed). = {}):
:param sequences:
``sequences`` is the list of Theano variables or dictionaries
describing the sequences ``scan`` has to iterate over. If a
sequence is given as wrapped in a dictionary, then a set of optional
information can be provided about the sequence. The dictionary
should have the following keys:
* ``input`` (*mandatory*) -- Theano variable representing the
sequence.
* ``taps`` -- Temporal taps of the sequence required by ``fn``.
They are provided as a list of integers, where a value ``k``
impiles that at iteration step ``t`` scan will pass to ``fn``
the slice ``t+k``. Default value is ``[0]``
Any Theano variable in the list ``sequences`` is automatically
wrapped into a dictionary where ``taps`` is set to ``[0]``
:param outputs_info:
``outputs_info`` is the list of Theano variables or dictionaries
describing the initial state of the outputs computed
recurrently. When this initial states are given as dictionary
optional information can be provided about the output corresponding
to these initial states. The dictionary should have the following
keys:
* ``initial`` -- Theano variable that represents the initial
state of a given output. In case the output is not computed
recursively (think of a map) and does not require a initial
state this field can be skiped. Given that only the previous
time step of the output is used by ``fn`` the initial state
should have the same shape as the output. If multiple time
taps are used, the initial state should have one extra
dimension that should cover all the possible taps. For example
if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0,
``fn`` will require (by an abuse of notation) ``output[-5]``,
``output[-2]`` and ``output[-1]``. This will be given by
the initial state, which in this case should have the shape
(5,)+output.shape. If this variable containing the initial
state is called ``init_y`` then ``init_y[0]`` *corresponds to*
``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``,
``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]``
coresponds to ``output[-2]``, ``init_y[4]`` corresponds to
``output[-1]``. While this order might seem strange, it comes
natural from splitting an array at a given point. Assume that
we have a array ``x``, and we choose ``k`` to be time step
``0``. Then our initial state would be ``x[:k]``, while the
output will be ``x[k:]``. Looking at this split, elements in
``x[:k]`` are ordered exactly like those in ``init_y``.
* ``taps`` -- Temporal taps of the output that will be | |
== 0:
return "0"
else:
return oct(n)[1:]
# << VBFunctions >> (31 of 57)
def RGB(r, g, b):
"""Return a Long whole number representing an RGB color value
The value for any argument to RGB that exceeds 255 is assumed to be 255.
If any argument is less than zero then this results in a ValueError.
"""
rm = min(255, Int(r))
gm = min(255, Int(g))
bm = min(255, Int(b))
#
if rm < 0 or gm < 0 or bm < 0:
raise ValueError("RGB values must be >= 0, were (%s, %s, %s)" % (r, g, b))
#
return ((bm*256)+gm)*256+rm
# << VBFunctions >> (32 of 57)
def Replace(expression, find, replace, start=1, count=-1):
"""Returns a string in which a specified substring has been replaced with another substring a specified number of times
The return value of the Replace function is a string, with substitutions made,
that begins at the position specified by start and and concludes at the end of
the expression string. It is not a copy of the original string from start to finish.
"""
if find:
return expression[:start-1] + expression[start-1:].replace(find, replace, count)
else:
return expression
# << VBFunctions >> (33 of 57)
def Right(text, number):
"""Return the right most characters in the text"""
return text[-number:]
# << VBFunctions >> (34 of 57)
_last_rnd_number = random.random()
def Rnd(value=1):
"""Return a random numer and optionally seed the current state"""
global _last_rnd_number
if value == 0:
return _last_rnd_number
elif value < 0:
random.seed(value)
r = random.random()
_last_rnd_number = r
return r
def Randomize(seed=None):
"""Seed the RNG
In VB this doesn't return a consistent sequence so we basically ignore the seed.
"""
random.seed()
# << VBFunctions >> (35 of 57)
def RSet(var, value):
"""Do a VB RSet
Right aligns a string within a string variable.
RSet stringvar = string
If stringvar is longer than string, RSet replaces any leftover characters
in stringvar with spaces, back to its beginning.
"""
return " "*(len(var)-len(value)) + value[:len(var)]
# << VBFunctions >> (36 of 57)
def Seek(channel):
"""Return the current 'cursor' position in the specified channel"""
return VBFiles.getFile(Int(channel)).tell()+1 # VB starts at 1
# << VBFunctions >> (37 of 57)
class _OptionsDB(config.VB2PYConfigObject):
"""A special config parser class to handle central VB options"""
def __init__(self, appname):
"""Initialize the parser"""
config.VB2PYConfigObject.__init__(self, filename=utils.relativePath("settings.ini"))
self.appname = appname
def __getitem__(self, key):
"""Get an item"""
section, name = key
section = self._getSettingName(section)
return config.VB2PYConfigObject.__getitem__(self, (section, name))
def __setitem__(self, key, value):
"""Set an item"""
section, name = key
section = self._getSettingName(section)
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, name, value)
self.save()
def save(self):
"""Store the options"""
f = open(utils.relativePath("settings.ini"), "w")
self._config.write(f)
f.close()
def _getSettingName(self, section):
"""Return the name for a section"""
return "%s.%s" % (self.appname, section)
def getAll(self, section):
"""Return all the items in a sections"""
thissection = self._getSettingName(section)
options = self._config.options(thissection)
ret = vbObjectInitialize(size=(len(options)-1, 1), objtype=str)
for idx in range(len(options)):
ret[idx, 0] = options[idx]
ret[idx, 1] = self[section, options[idx]]
return ret
def delete(self, section, name):
"""Delete a setting from the settings file"""
section = self._getSettingName(section)
self._config.remove_option(section, name)
self.save()
# << VBFunctions >> (38 of 57)
def GetSetting(appname, section, key, default=None):
"""Get a setting from the central setting file"""
settings = _OptionsDB(appname)
try:
return settings[section, key]
except config.ConfigParser.Error:
if default is not None:
return default
raise
# << VBFunctions >> (39 of 57)
def GetAllSettings(appname, section):
"""Get all settings from the central setting file"""
settings = _OptionsDB(appname)
return settings.getAll(section)
# << VBFunctions >> (40 of 57)
def SaveSetting(appname, section, key, value):
"""Set a setting in the central setting file"""
settings = _OptionsDB(appname)
settings[section, key] = str(value)
# << VBFunctions >> (41 of 57)
def DeleteSetting(appname, section, key):
"""Delete a setting in the central setting file"""
settings = _OptionsDB(appname)
settings.delete(section, key)
# << VBFunctions >> (42 of 57)
def Sgn(num):
"""Return the sign of a number"""
n = float(num)
if n < 0:
return -1
elif n == 0:
return 0
else:
return 1
# << VBFunctions >> (43 of 57)
def String(num=None, text=None):
"""Return a repeated number of string items"""
if num is None and text is None:
return str()
else:
return text[:1]*CInt(num)
def Space(num):
"""Return a repeated number of spaces"""
return String(num, " ")
Spc = Space
# << VBFunctions >> (44 of 57)
def Split(text, delimiter=" ", limit=-1, compare=None):
"""Split a string using the delimiter
If the optional limit is present then this defines the number
of items returned. The compare is used for different string comparison
types in VB, but this is not implemented at the moment
"""
if compare is not None:
raise VB2PYNotSupported("Compare options for Split are not currently supported")
#
if limit == 0:
return VBArray()
elif limit > 0:
return Array(*str(text).split(delimiter, limit-1))
else:
return Array(*str(text).split(delimiter))
# << VBFunctions >> (45 of 57)
def Sqr(num):
"""Return the square root of a value"""
return math.sqrt(float(num))
def Sin(num):
"""Return the sin of a value"""
return math.sin(float(num))
def Cos(num):
"""Return the cosine of a value"""
return math.cos(float(num))
def Tan(num):
"""Return the tangent of a value"""
return math.tan(float(num))
def Atn(num):
"""Return the arc-tangent of a value"""
return math.atan(float(num))
# << VBFunctions >> (46 of 57)
def StrReverse(s):
"""Reverse a string"""
l = list(str(s))
l.reverse()
return "".join(l)
# << VBFunctions >> (47 of 57)
def Switch(*args):
"""Choose from a list of expression each with its own condition
The arguments are presented as a sequence of condition, expression pairs
and the first condition that returns a true causes its expression to be
returned. If no conditions are true then the function returns None
"""
arg_list = list(args)
arg_list.reverse()
#
while arg_list:
cond, expr = arg_list.pop(), arg_list.pop()
if cond:
return expr
return None
# << VBFunctions >> (48 of 57)
def Timer():
"""Returns a Single representing the number of seconds elapsed since midnight"""
ltime = time.localtime()
h, m, s = ltime[3:6]
return h*3600.0 + m*60.0 + s
# << VBFunctions >> (49 of 57)
def Trim(text):
"""Strip spaces from the text"""
return str(text).strip()
def LTrim(text):
"""Strip spaces from the left of the text"""
return str(text).lstrip()
def RTrim(text):
"""Strip spaces from the right of the text"""
return str(text).rstrip()
# << VBFunctions >> (50 of 57)
def UBound(obj, dimension=1):
"""Return the upper bound for the index"""
try:
return obj.__ubound__(dimension)
except AttributeError:
raise ValueError("UBound called for invalid object")
def LBound(obj, dimension=1):
"""Return the lower bound for the index"""
try:
return obj.__lbound__(dimension)
except AttributeError:
raise ValueError("LBound called for invalid object")
# << VBFunctions >> (51 of 57)
def Val(text):
"""Return the value of a string
This function finds the longest leftmost number in the string and
returns it. If there are no valid numbers then it returns 0.
The method chosen here is very poor - we just keep trying to convert the
string to a float and just use the last successful as we increase
the size of the string. A Regular expression approach is probably
quicker.
"""
best = 0
for idx in range(len(text)):
try:
best = float(text[:idx+1])
except ValueError:
pass
return best
# << VBFunctions >> (52 of 57)
def vbForRange(start, stop, step=1):
"""Mimic the range in a for statement
VB's range is inclusive and can include non-integer elements so
we use an generator.
"""
num_repeats = (stop-start)/step
if num_repeats < 0:
raise StopIteration
current = start
while num_repeats >= 0:
yield current
current += step
num_repeats -= 1
# << VBFunctions >> (53 of 57)
def vbGetEventArgs(names, arguments):
"""Return arguments passed in an event
VB Control events have parameters passed in the call, eg MouseMove(Button, Shift, X, Y).
In PythonCard the event parameters are all passed as a single event object. We
can easily unpack the attributes back to the values in the Event Handler but
we also have to account for the fact that someone might call the Handler
directly and therefore assume that they can pass parameters individually.
This function tries to unpack the params from an event object and, if
successful, returns them as a tuple. If this fails then it tries to
assume that they were already in a tuple and return them that way.
This can still fail if there are keyword arguments ... TODO!
"""
# arguments is the *args tuple
#
# Is there only | |
Ruston Beauty School"),
("<NAME>'s Beauty College","<NAME>'s Beauty College"),
("Patrick Henry Community College","Patrick Henry Community College"),
("Paul D Camp Community College","Paul D Camp Community College"),
("Paul Mitchell The School-Charleston","<NAME>chell The School-Charleston"),
("<NAME> The School-Columbia","<NAME>chell The School-Columbia"),
("<NAME>chell The School-Raleigh","<NAME>chell The School-Raleigh"),
("<NAME>chell The School-Spokane","<NAME>chell The School-Spokane"),
("<NAME> the School-Arkansas","<NAME>chell the School-Arkansas"),
("<NAME>chell the School-Arlington","<NAME>chell the School-Arlington"),
("<NAME> the School-Atlanta","<NAME>chell the School-Atlanta"),
("<NAME> the School-Austin","<NAME>chell the School-Austin"),
("<NAME> the School-Birmingham","<NAME>chell the School-Birmingham"),
("<NAME>chell the School-Boise","<NAME>chell the School-Boise"),
("<NAME> the School-Bradley","<NAME>chell the School-Bradley"),
("<NAME>chell the School-Chicago","<NAME>chell the School-Chicago"),
("<NAME>chell the School-Cincinnati","<NAME>chell the School-Cincinnati"),
("<NAME>chell the School-Colorado Springs","Paul Mitchell the School-Colorado Springs"),
("<NAME>chell the School-Costa Mesa","<NAME>chell the School-Costa Mesa"),
("<NAME>chell the School-Danbury","<NAME>chell the School-Danbury"),
("<NAME>chell the School-Delaware","<NAME>chell the School-Delaware"),
("<NAME>chell the School-Denver","Paul Mitchell the School-Denver"),
("<NAME>chell the School-East Bay","Paul Mitchell the School-East Bay"),
("Paul Mitchell the School-Esani","<NAME>chell the School-Esani"),
("<NAME> the School-Escanaba","<NAME>chell the School-Escanaba"),
("<NAME>chell the School-Fayetteville","<NAME>chell the School-Fayetteville"),
("<NAME> the School-Fort Myers","<NAME>chell the School-Fort Myers"),
("<NAME>chell the School-Fresno","<NAME>chell the School-Fresno"),
("<NAME>chell the School-Gastonia","<NAME>chell the School-Gastonia"),
("<NAME>chell the School-Great Lakes","<NAME>chell the School-Great Lakes"),
("<NAME>chell the School-Green Bay","<NAME>chell the School-Green Bay"),
("<NAME>chell the School-Honolulu","<NAME>chell the School-Honolulu"),
("<NAME>chell the School-Houston","<NAME>chell the School-Houston"),
("<NAME> the School-Huntsville","<NAME>chell the School-Huntsville"),
("<NAME> the School-Indianapolis","<NAME>chell the School-Indianapolis"),
("<NAME>chell the School-Jacksonville","<NAME>chell the School-Jacksonville"),
("<NAME>chell the School-Jersey Shore","<NAME>chell the School-Jersey Shore"),
("<NAME>chell the School-Knoxville","<NAME>chell the School-Knoxville"),
("<NAME>chell the School-Las Vegas","Paul Mitchell the School-Las Vegas"),
("<NAME> the School-Lexington","<NAME>chell the School-Lexington"),
("<NAME>chell the School-Louisville","<NAME>chell the School-Louisville"),
("<NAME>chell the School-Mclean","Paul Mitchell the School-Mclean"),
("<NAME> the School-Memphis","<NAME> the School-Memphis"),
("<NAME> the School-Miami","<NAME>chell the School-Miami"),
("<NAME> the School-Michigan","<NAME>chell the School-Michigan"),
("<NAME> the School-Monroe","<NAME>chell the School-Monroe"),
("<NAME> the School-Murfreesboro","<NAME>chell the School-Murfreesboro"),
("<NAME> the School-Nashville","<NAME>chell the School-Nashville"),
("<NAME> the School-Normal","<NAME>chell the School-Normal"),
("<NAME> the School-Ogden","<NAME> the School-Ogden"),
("<NAME> the School-Orlando","<NAME>chell the School-Orlando"),
("<NAME> the School-Overland Park","<NAME>chell the School-Overland Park"),
("<NAME> the School-Pasadena","<NAME> the School-Pasadena"),
("<NAME> the School-Phoenix","<NAME>chell the School-Phoenix"),
("<NAME>chell the School-Portland","<NAME>chell the School-Portland"),
("<NAME> the School-Portsmouth","<NAME>chell the School-Portsmouth"),
("<NAME> the School-Provo","<NAME> the School-Provo"),
("<NAME>chell the School-Reno","<NAME>chell the School-Reno"),
("<NAME> the School-Rhode Island","<NAME>chell the School-Rhode Island"),
("<NAME> the School-Sacramento","<NAME>chell the School-Sacramento"),
("<NAME>chell the School-Salt Lake City","Paul Mitchell the School-Salt Lake City"),
("<NAME> the School-San Antonio","<NAME> the School-San Antonio"),
("<NAME> the School-San Diego","<NAME>chell the School-San Diego"),
("<NAME> the School-Santa Barbara","<NAME> the School-Santa Barbara"),
("<NAME> the School-<NAME>","<NAME> the School-Sher<NAME>"),
("<NAME> the School-Springfield","<NAME> the School-Springfield"),
("<NAME> the School-St George","<NAME> the School-St George"),
("<NAME> the School-St Louis","<NAME> the School-St Louis"),
("<NAME> the School-Tampa","<NAME> the School-Tampa"),
("<NAME> the School-Temecula","<NAME> the School-Temecula"),
("<NAME> the School-Wichita","<NAME> the School-Wichita"),
("<NAME> the School-Woodbridge","<NAME> the School-Woodbridge"),
("Paul Quinn College","Paul Quinn College"),
("Paul Smiths College of Arts and Science","<NAME>s College of Arts and Science"),
("Payne Theological Seminary","Payne Theological Seminary"),
("Pearl River Community College","Pearl River Community College"),
("Peirce College","Peirce College"),
("Pellissippi State Community College","Pellissippi State Community College"),
("Peninsula College","Peninsula College"),
("Penn Commercial Business/Technical School","Penn Commercial Business/Technical School"),
("Pennco Tech-Blackwood","Pennco Tech-Blackwood"),
("Pennco Tech-Bristol","Pennco Tech-Bristol"),
("Pennsylvania Academy of Cosmetology Arts and Sciences-Du Bois","Pennsylvania Academy of Cosmetology Arts and Sciences-Du Bois"),
("Pennsylvania Academy of Cosmetology Arts and Sciences-Johnstown","Pennsylvania Academy of Cosmetology Arts and Sciences-Johnstown"),
("Pennsylvania Academy of the Fine Arts","Pennsylvania Academy of the Fine Arts"),
("Pennsylvania College of Art and Design","Pennsylvania College of Art and Design"),
("Pennsylvania College of Health Sciences","Pennsylvania College of Health Sciences"),
("Pennsylvania College of Technology","Pennsylvania College of Technology"),
("Pennsylvania Gunsmith School","Pennsylvania Gunsmith School"),
("Pennsylvania Highlands Community College","Pennsylvania Highlands Community College"),
("Pennsylvania Institute of Health and Technology","Pennsylvania Institute of Health and Technology"),
("Pennsylvania Institute of Taxidermy Inc","Pennsylvania Institute of Taxidermy Inc"),
("Pennsylvania Institute of Technology","Pennsylvania Institute of Technology"),
("Pennsylvania School of Business","Pennsylvania School of Business"),
("Pennsylvania State System of Higher Education-Central Office","Pennsylvania State System of Higher Education-Central Office"),
("Pennsylvania State University-College of Medicine","Pennsylvania State University-College of Medicine"),
("Pennsylvania State University-Main Campus","Pennsylvania State University-Main Campus"),
("Pennsylvania State University-Penn State Abington","Pennsylvania State University-Penn State Abington"),
("Pennsylvania State University-Penn State Altoona","Pennsylvania State University-Penn State Altoona"),
("Pennsylvania State University-Penn State Beaver","Pennsylvania State University-Penn State Beaver"),
("Pennsylvania State University-Penn State Berks","Pennsylvania State University-Penn State Berks"),
("Pennsylvania State University-Penn State Brandywine","Pennsylvania State University-Penn State Brandywine"),
("Pennsylvania State University-Penn State Dubois","Pennsylvania State University-Penn State Dubois"),
("Pennsylvania State University-Penn State Erie-Behrend College","Pennsylvania State University-Penn State Erie-Behrend College"),
("Pennsylvania State University-Penn State Fayette- Eberly","Pennsylvania State University-Penn State Fayette- Eberly"),
("Pennsylvania State University-Penn State Great Valley","Pennsylvania State University-Penn State Great Valley"),
("Pennsylvania State University-Penn State Greater Allegheny","Pennsylvania State University-Penn State Greater Allegheny"),
("Pennsylvania State University-Penn State Harrisburg","Pennsylvania State University-Penn State Harrisburg"),
("Pennsylvania State University-Penn State Hazleton","Pennsylvania State University-Penn State Hazleton"),
("Pennsylvania State University-Penn State Lehigh Valley","Pennsylvania State University-Penn State Lehigh Valley"),
("Pennsylvania State University-Penn State Mont Alto","Pennsylvania State University-Penn State Mont Alto"),
("Pennsylvania State University-Penn State New Kensington","Pennsylvania State University-Penn State New Kensington"),
("Pennsylvania State University-Penn State Schuylkill","Pennsylvania State University-Penn State Schuylkill"),
("Pennsylvania State University-Penn State Shenango","Pennsylvania State University-Penn State Shenango"),
("Pennsylvania State University-Penn State Wilkes-Barre","Pennsylvania State University-Penn State Wilkes-Barre"),
("Pennsylvania State University-Penn State Worthington Scranton","Pennsylvania State University-Penn State Worthington Scranton"),
("Pennsylvania State University-Penn State York","Pennsylvania State University-Penn State York"),
("Pennsylvania State University-World Campus","Pennsylvania State University-World Campus"),
("Penrose Academy","Penrose Academy"),
("Pensacola School of Massage Therapy & Health Careers","Pensacola School of Massage Therapy & Health Careers"),
("Pensacola State College","Pensacola State College"),
("Penta County Joint Vocational School","Penta County Joint Vocational School"),
("Pentecostal Theological Seminary","Pentecostal Theological Seminary"),
("Pepperdine University","Pepperdine University"),
("Peralta Community College System Office","Peralta Community College System Office"),
("Performance Training Institute","Performance Training Institute"),
("Perry Technical Institute","Perry Technical Institute"),
("Personal Fitness & Nutrition Center","Personal Fitness & Nutrition Center"),
("Peru State College","Peru State College"),
("Pfeiffer University","Pfeiffer University"),
("Phagans Beauty College","Phagans Beauty College"),
("Phagans Central Oregon Beauty College","Phagans Central Oregon Beauty College"),
("Phagans Grants Pass College of Beauty","Phagans Grants Pass College of Beauty"),
("Phagans Medford Beauty School","Phagans Medford Beauty School"),
("Phagans Newport Academy of Cosmetology Careers","Phagans Newport Academy of Cosmetology Careers"),
("Phagans School of Beauty","Phagans School of Beauty"),
("Phagans School of Hair Design","Phagans School of Hair Design"),
("Phagans School of Hair Design-Portland","Phagans School of Hair Design-Portland"),
("Phagans Tigard Beauty School","Phagans Tigard Beauty School"),
("Philadelphia College of Osteopathic Medicine","Philadelphia College of Osteopathic Medicine"),
("Philadelphia University","Philadelphia University"),
("Philander Smith College","Philander Smith College"),
("Phillips Beth Israel School of Nursing","Phillips Beth Israel School of Nursing"),
("Phillips Community College of the University of Arkansas","Phillips Community College of the University of Arkansas"),
("Phillips Graduate Institute","Phillips Graduate Institute"),
("Phillips Theological Seminary","Phillips Theological Seminary"),
("Phoenix College","Phoenix College"),
("Phoenix Institute of Herbal Medicine & Acupuncture","Phoenix Institute of Herbal Medicine & Acupuncture"),
("Phoenix Seminary","Phoenix Seminary"),
("Photographic Center Northwest","Photographic Center Northwest"),
("Pickaway Ross Joint Vocational School District","Pickaway Ross Joint Vocational School District"),
("Pickens Technical College","Pickens Technical College"),
("Piedmont College","Piedmont College"),
("Piedmont Community College","Piedmont Community College"),
("Piedmont International University","Piedmont International University"),
("Piedmont Technical College","Piedmont Technical College"),
("Piedmont Virginia Community College","Piedmont Virginia Community College"),
("Pierce College at Fort Steilacoom","Pierce College at Fort Steilacoom"),
("Pierce College at Puyallup","Pierce College at Puyallup"),
("Pierpont Community and Technical College","Pierpont Community and Technical College"),
("Pike County Joint Vocational School District","Pike County Joint Vocational School District"),
("Pike-Lincoln Technical Center","Pike-Lincoln Technical Center"),
("Pikes Peak Community College","Pikes Peak Community College"),
("Pillar College","Pillar College"),
("Pima Community College","Pima Community College"),
("Pima Medical Institute-Albuquerque West","Pima Medical Institute-Albuquerque West"),
("Pima Medical Institute-Albuquerque","Pima Medical Institute-Albuquerque"),
("Pima Medical Institute-Chula Vista","Pima Medical Institute-Chula Vista"),
("Pima Medical Institute-Colorado Springs","Pima Medical Institute-Colorado Springs"),
("Pima Medical Institute-Denver","Pima Medical Institute-Denver"),
("Pima Medical Institute-East Valley","Pima Medical Institute-East Valley"),
("Pima Medical Institute-Houston","Pima Medical Institute-Houston"),
("Pima Medical Institute-Las Vegas","Pima Medical Institute-Las Vegas"),
("Pima Medical Institute-Mesa","Pima Medical Institute-Mesa"),
("Pima Medical Institute-Renton","Pima Medical Institute-Renton"),
("Pima Medical Institute-Seattle","Pima Medical Institute-Seattle"),
("Pima Medical Institute-South Denver","Pima Medical Institute-South Denver"),
("Pima Medical Institute-Tucson","Pima Medical Institute-Tucson"),
("Pine Manor College","Pine Manor College"),
("Pine Technical College","Pine Technical College"),
("Pinellas Technical Education Center-Clearwater","Pinellas Technical Education Center-Clearwater"),
("Pinellas Technical Education Center-St Petersburg","Pinellas Technical Education Center-St Petersburg"),
("Pineville Beauty School","Pineville Beauty School"),
("Pinnacle Career Institute-Lawrence","Pinnacle Career Institute-Lawrence"),
("Pinnacle Career Institute-North Kansas City","Pinnacle Career Institute-North Kansas City"),
("Pinnacle Career Institute-South Kansas City","Pinnacle Career Institute-South Kansas City"),
("Pinnacle College","Pinnacle College"),
("Pinnacle Institute of Cosmetology","Pinnacle Institute of Cosmetology"),
("Pioneer Career and Technology Center","Pioneer Career and Technology Center"),
("Pioneer Pacific College","Pioneer Pacific College"),
("Pioneer Technology Center","Pioneer Technology Center"),
("Pipo Academy of Hair Design","Pipo Academy of Hair Design"),
("Pitt Community College","Pitt Community College"),
("Pittsburg State University","Pittsburg State University"),
("Pittsburgh Institute of Aeronautics","Pittsburgh Institute of Aeronautics"),
("Pittsburgh Institute of Mortuary Science Inc","Pittsburgh Institute of Mortuary Science Inc"),
("Pittsburgh Technical Institute","Pittsburgh Technical Institute"),
("Pittsburgh Theological Seminary","Pittsburgh Theological Seminary"),
("Pittsburgh's Ohio Valley Hospital School of Nursing","Pittsburgh's Ohio Valley Hospital School of Nursing"),
("Pitzer College","Pitzer College"),
("Pivot Point Academy-Bloomingdale","Pivot Point Academy-Bloomingdale"),
("Pivot Point Academy-Chicago","Pivot Point Academy-Chicago"),
("Pivot Point Academy-Evanston","Pivot Point Academy-Evanston"),
("Platt College-Aurora","Platt College-Aurora"),
("Platt College-Central OKC","Platt College-Central OKC"),
("Platt College-Dallas","Platt College-Dallas"),
("Platt College-Lawton","Platt College-Lawton"),
("Platt College-Los Angeles","Platt College-Los Angeles"),
("Platt College-Moore","Platt College-Moore"),
("Platt College-North OKC","Platt College-North OKC"),
("Platt College-Ontario","Platt College-Ontario"),
("Platt College-Riverside","Platt College-Riverside"),
("Platt College-San Diego","Platt College-San Diego"),
("Platt College-Tulsa","Platt College-Tulsa"),
("Platt Technical High School","Platt Technical High School"),
("Plaza Beauty School","Plaza Beauty School"),
("Plaza College","Plaza College"),
("Plymouth State University","Plymouth State University"),
("Point Loma Nazarene University","Point Loma Nazarene University"),
("Point Park University","Point Park University"),
("Point University","Point University"),
("Polaris Career Center","Polaris Career Center"),
("Polk State College","Polk State College"),
("Polytechnic Institute of New York University","Polytechnic Institute of New York University"),
("Polytechnic University of Puerto Rico-Miami","Polytechnic University of Puerto Rico-Miami"),
("Polytechnic University of Puerto Rico-Orlando","Polytechnic University of Puerto Rico-Orlando"),
("Pomona College","Pomona College"),
("Pomona Unified School District Adult and Career Education","Pomona Unified School District Adult and Career Education"),
("Ponca City Beauty College","Ponca City Beauty College"),
("Ponce | |
optional): If True, crop images.
norm_im (bool, optional): If True, normalize images.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
DataGenerator.__init__(
self,
list_IDs,
labels,
clusterIDs,
batch_size,
dim_in,
n_channels_in,
n_channels_out,
out_scale,
shuffle,
camnames,
crop_width,
crop_height,
samples_per_cluster,
vidreaders,
chunks,
mono,
mirror,
predict_flag,
)
self.vmin = vmin
self.vmax = vmax
self.nvox = nvox
self.vsize = (vmax - vmin) / nvox
self.dim_out_3d = (nvox, nvox, nvox)
self.labels_3d = labels_3d
self.camera_params = camera_params
self.interp = interp
self.depth = depth
self.channel_combo = channel_combo
print(self.channel_combo)
self.mode = mode
self.immode = immode
self.tifdirs = tifdirs
self.com3d = com3d
self.rotation = rotation
self.distort = distort
self.expval = expval
self.multicam = multicam
self.var_reg = var_reg
self.COM_aug = COM_aug
self.crop_im = crop_im
# If saving npy as uint8 rather than training directly, dont normalize
self.norm_im = norm_im
self.gpu_id = gpu_id
def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray]:
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def rot90(self, X: np.ndarray) -> np.ndarray:
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Input volume.
Returns:
np.ndarray: Rotated volume
"""
X = np.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Input volume.
Returns:
np.ndarray: Rotated volume
"""
X = X[::-1, ::-1, :, :]
return X
def __data_generation(self, list_IDs_temp: List) -> Tuple:
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
rotangle: Rotation angle
Raises:
Exception: Invalid generator mode specified.
"""
# Initialization
first_exp = int(self.list_IDs[0].split("_")[0])
X = np.zeros(
(
self.batch_size * len(self.camnames[first_exp]),
*self.dim_out_3d,
self.n_channels_in + self.depth,
),
dtype="float32",
)
if self.mode == "3dprob":
y_3d = np.zeros(
(self.batch_size, self.n_channels_out, *self.dim_out_3d),
dtype="float32",
)
elif self.mode == "coordinates":
y_3d = np.zeros((self.batch_size, 3, self.n_channels_out), dtype="float32")
else:
raise Exception("not a valid generator mode")
if self.expval:
sz = self.dim_out_3d[0] * self.dim_out_3d[1] * self.dim_out_3d[2]
X_grid = np.zeros((self.batch_size, sz, 3), dtype="float32")
# Generate data
cnt = 0
for i, ID in enumerate(list_IDs_temp):
sampleID = int(ID.split("_")[1])
experimentID = int(ID.split("_")[0])
# For 3D ground truth
this_y_3d = self.labels_3d[ID]
this_COM_3d = self.com3d[ID]
if self.COM_aug is not None:
this_COM_3d = this_COM_3d.copy().ravel()
this_COM_3d = (
this_COM_3d
+ self.COM_aug * 2 * np.random.rand(len(this_COM_3d))
- self.COM_aug
)
# Create and project the grid here,
xgrid = np.arange(
self.vmin + this_COM_3d[0] + self.vsize / 2,
this_COM_3d[0] + self.vmax,
self.vsize,
)
ygrid = np.arange(
self.vmin + this_COM_3d[1] + self.vsize / 2,
this_COM_3d[1] + self.vmax,
self.vsize,
)
zgrid = np.arange(
self.vmin + this_COM_3d[2] + self.vsize / 2,
this_COM_3d[2] + self.vmax,
self.vsize,
)
(x_coord_3d, y_coord_3d, z_coord_3d) = np.meshgrid(xgrid, ygrid, zgrid)
if self.mode == "3dprob":
for j in range(self.n_channels_out):
y_3d[i, j] = np.exp(
-(
(y_coord_3d - this_y_3d[1, j]) ** 2
+ (x_coord_3d - this_y_3d[0, j]) ** 2
+ (z_coord_3d - this_y_3d[2, j]) ** 2
)
/ (2 * self.out_scale ** 2)
)
# When the voxel grid is coarse, we will likely miss
# the peak of the probability distribution, as it
# will lie somewhere in the middle of a large voxel.
# So here we renormalize to [~, 1]
if self.mode == "coordinates":
if this_y_3d.shape == y_3d[i].shape:
y_3d[i] = this_y_3d
else:
msg = "Note: ignoring dimension mismatch in 3D labels"
warnings.warn(msg)
if self.expval:
X_grid[i] = np.stack(
(
x_coord_3d.ravel(),
y_coord_3d.ravel(),
z_coord_3d.ravel(),
),
axis=1,
)
for _ci, camname in enumerate(self.camnames[experimentID]):
ts = time.time()
# Need this copy so that this_y does not change
this_y = np.round(self.labels[ID]["data"][camname]).copy()
if np.all(np.isnan(this_y)):
com_precrop = np.zeros_like(this_y[:, 0]) * np.nan
else:
# For projecting points, we should not use this offset
com_precrop = np.nanmean(this_y, axis=1)
# Store sample
if not self.mirror or _ci == 0:
# for pre-cropped tifs
if self.immode == "tif":
thisim = imageio.imread(
os.path.join(
self.tifdirs[experimentID],
camname,
"{}.tif".format(sampleID),
)
)
# From raw video, need to crop
elif self.immode == "vid":
thisim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][camname],
camname,
extension=self.extension,
)[
self.crop_height[0] : self.crop_height[1],
self.crop_width[0] : self.crop_width[1],
]
# print("Decode frame took {} sec".format(time.time() - ts))
tss = time.time()
# Load in the image file at the specified path
elif self.immode == "arb_ims":
thisim = imageio.imread(
self.tifdirs[experimentID]
+ self.labels[ID]["frames"][camname][0]
+ ".jpg"
)
if self.mirror:
# Save copy of the first image loaded in, so that it can be flipped accordingly.
self.raw_im = thisim.copy()
if self.mirror and self.camera_params[experimentID][camname]["m"] == 1:
thisim = self.raw_im.copy()
thisim = thisim[-1::-1]
elif self.mirror and self.camera_params[experimentID][camname]["m"] == 0:
thisim = self.raw_im
elif self.mirror:
raise Exception("Invalid mirror parameter, m, must be 0 or 1")
if self.immode == "vid" or self.immode == "arb_ims":
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = np.nanmean(this_y, axis=1)
if self.crop_im:
if np.all(np.isnan(com)):
thisim = np.zeros(
(
self.dim_in[1],
self.dim_in[0],
self.n_channels_in,
)
)
else:
thisim = processing.cropcom(
thisim, com, size=self.dim_in[0]
)
# Project de novo or load in approximate (faster)
# TODO(break up): This is hard to read, consider breaking up
ts = time.time()
proj_grid = ops.project_to2d(
np.stack(
(
x_coord_3d.ravel(),
y_coord_3d.ravel(),
z_coord_3d.ravel(),
),
axis=1,
),
self.camera_params[experimentID][camname]["K"],
self.camera_params[experimentID][camname]["R"],
self.camera_params[experimentID][camname]["t"],
)
if self.depth:
d = proj_grid[:, 2]
# print("2D Proj took {} sec".format(time.time() - ts))
ts = time.time()
if self.distort:
"""
Distort points using lens distortion parameters
"""
proj_grid = ops.distortPoints(
proj_grid[:, :2],
self.camera_params[experimentID][camname]["K"],
np.squeeze(
self.camera_params[experimentID][camname]["RDistort"]
),
np.squeeze(
self.camera_params[experimentID][camname]["TDistort"]
),
).T
# print("Distort took {} sec".format(time.time() - ts))
# ts = time.time()
if self.crop_im:
proj_grid = proj_grid[:, :2] - com_precrop + self.dim_in[0] // 2
# Now all coordinates should map properly to the image
# cropped around the COM
else:
# Then the only thing we need to correct for is
# crops at the borders
proj_grid = proj_grid[:, :2]
proj_grid[:, 0] = proj_grid[:, 0] - self.crop_width[0]
proj_grid[:, 1] = proj_grid[:, 1] - self.crop_height[0]
(r, g, b) = ops.sample_grid(thisim, proj_grid, method=self.interp)
# print("Sample grid took {} sec".format(time.time() - ts))
if (
~np.any(np.isnan(com_precrop))
or (self.channel_combo == "avg")
or not self.crop_im
):
X[cnt, :, :, :, 0] = np.reshape(
r, (self.nvox, self.nvox, self.nvox)
)
X[cnt, :, :, :, 1] = np.reshape(
g, (self.nvox, self.nvox, self.nvox)
)
X[cnt, :, :, :, 2] = np.reshape(
b, (self.nvox, self.nvox, self.nvox)
)
if self.depth:
X[cnt, :, :, :, 3] = np.reshape(
d, (self.nvox, self.nvox, self.nvox)
)
cnt = cnt + 1
# print("Projection grid took {} sec".format(time.time() - tss))
if self.multicam:
X = np.reshape(
X,
(
self.batch_size,
len(self.camnames[first_exp]),
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4],
),
)
X = np.transpose(X, [0, 2, 3, 4, 5, 1])
if self.channel_combo == "avg":
X = np.nanmean(X, axis=-1)
# Randomly reorder the cameras fed into the first layer
elif self.channel_combo == "random":
X = X[:, :, :, :, :, np.random.permutation(X.shape[-1])]
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
order="F",
)
else:
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
order="F",
)
else:
# Then leave the batch_size and num_cams combined
y_3d = np.tile(y_3d, [len(self.camnames[experimentID]), 1, 1, 1, 1])
if self.mode == "3dprob":
y_3d = np.transpose(y_3d, [0, 2, 3, 4, 1])
if self.rotation:
if self.expval:
# First make X_grid 3d
X_grid = np.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = self.random_rotate(X, X_grid)
# Need to reshape back to raveled version
X_grid = np.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = self.random_rotate(X, y_3d)
if self.mono and self.n_channels_in == 3:
# Convert from RGB to mono using the skimage formula. Drop the duplicated frames.
# Reshape so RGB can be processed easily.
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Python interface to BioCyc REST API with request caching
Implementation of the BioCyc REST API through the same interface as in Pathway Tools.
Functions are implemented with remote request with cache
<NAME>
May 2014
"""
import os
import errno
import pickle
import requests
import time
import csv
import logging
import re
from datetime import datetime, timedelta
from collections import defaultdict, OrderedDict
try:
import xml.etree.cElementTree as et
except ImportError:
import xml.etree.ElementTree as et
try:
# Python 2.x
import HTMLParser
html = HTMLParser.HTMLParser()
except:
# Python 3+
try:
import html.parser
html = html.parser.HTMLParser()
except:
# Python 3.5
import html
strip_tags_re = re.compile(r'<[^>]*?>')
strip_entities_re = re.compile(r'[&;]*')
type_converter = {
'string':str,
'float':float,
'integer':int,
}
from .exceptions import BioCycObjectNotFound, BioCycInvalidExpiry, BioCycInvalidDetailLevel
from .singleton import Singleton
DETAIL_NONE = 'none'
DETAIL_LOW = 'low'
DETAIL_FULL = 'full'
DEFAULT_RECORD_EXPIRY = timedelta(weeks=6*4) # Expire after 6 months
DBLINK_URLS = {
'BIOPATH': "http://www.molecular-networks.com/biopath3/biopath/mols/%s",
'CAS': "http://www.commonchemistry.org/ChemicalDetail.aspx?ref=%s",
'CHEBI': "http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:%s",
'CHEMSPIDER': "http://www.chemspider.com/%s",
'HMDB': "http://www.hmdb.ca/compounds/%s",
'KEGG': "http://www.genome.ad.jp/dbget-bin/www_bget?%s",
'KNAPSACK': "http://kanaya.naist.jp/knapsack_jsp/information.jsp?sname=C_ID&word=%s",
'LIGAND-CPD': "http://www.genome.ad.jp/dbget-bin/www_bget?%s",
'NCBI-TAXONOMY-DB': "http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id=%s",
'PUBCHEM': "http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?cid=%s",
'UNIPROT': "http://www.uniprot.org/uniprot/%s",
}
REACTION_DIRECTIONS = {
'LEFT-TO-RIGHT': 'forward',
'RIGHT-TO-LEFT': 'back',
'REVERSIBLE': 'both',
'IRREVERSIBLE-LEFT-TO-RIGHT': 'forward',
'IRREVERSIBLE-RIGHT-TO-LEFT': 'back',
'PHYSIOL-LEFT-TO-RIGHT': 'forward',
'PHYSIOL-RIGHT-TO-LEFT': 'back'
}
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
pass
def to_plain_text(str):
'''
Return a plain-text version of a given string
This is a dumb approach that tags and then removing entity markers
but this is fine for the content from biocyc where entities are β etc.
Stripping in this way turns these into plaintext 'beta' which is preferable
to unicode
'''
str = strip_tags_re.sub('', str)
str = strip_entities_re.sub('', str)
return str
clean = lambda l: [i for i in l if i]
class BioCyc(object):
__metaclass__ = Singleton
"""
Basic tools for querying a specific organism via Pathway Tools/BioCyc web API
"""
_hammer_lock = None
_hammer_delay = timedelta(seconds=1)
def __init__(self):
self.secondary_cache_paths = [] # Not yet implemented
self.cache_path = os.path.join( os.path.expanduser('~'), '.biocyc' )
self.memory_cache = defaultdict( OrderedDict )
self.max_memory_cache = 50000
self.set_detail(DETAIL_FULL)
self.set_organism('HUMAN')
self.expire_records_after = DEFAULT_RECORD_EXPIRY
def _get_locals(self, table):
if table in self._locals:
return self._locals[table]
else:
lt = []
for cache_path in [self.cache_path] + self.secondary_cache_paths:
try:
with open( os.path.join( cache_path, self.org_id, table), 'rU') as f:
reader = csv.reader(f)
for row in reader:
lt.append( self.get(row[0]) )
except:
continue
self._locals[table] = lt
return lt
@property
def known_pathways(self):
return self._get_locals('pathways')
@property
def known_genes(self):
return self._get_locals('genes')
@property
def known_compounds(self):
return self._get_locals('compounds')
@property
def known_proteins(self):
return self._get_locals('proteins')
@property
def known_reactions(self):
return self._get_locals('reactions')
def _get_by_name(self, table, n):
if table not in self._synonyms:
nt = {}
for cache_path in [self.cache_path] + self.secondary_cache_paths:
try:
with open( os.path.join( cache_path, self.org_id, table + '-synonyms'), 'rU') as f:
reader = csv.reader(f)
for row in reader:
nt[ row[1] ] = self.get(row[0])
except:
continue
self._synonyms[table] = nt
if n in self._synonyms[table]:
return self._synonyms[table][n]
else:
None
def find_pathway_by_name(self, n):
return self._get_by_name('pathways', n)
def find_gene_by_name(self, n):
return self._get_by_name('genes', n)
def find_compound_by_name(self, n):
return self._get_by_name('compounds', n)
def find_protein_by_name(self, n):
return self._get_by_name('proteins', n)
def find_reaction_by_name(self, n):
return self._get_by_name('reactions', n)
def find_by_name(self,n ):
for t in ['pathways', 'genes', 'reactions', 'compounds', 'proteins']:
o = self._get_by_name(t, n)
if o is not None:
break
return o
'''
This API is incomplete.
The BioCyc remote API for foreignids appears to be broken as of 26.06.2014
All requests return zero.
def get_via_foreign_id(self, db, id):
# Implement a cacheing lookup using foreign object IDs
# Search local table first (CSV-backed) then failing that use a remote
# lookup. If found, return and get the object, if not return None.
# Store the lookup for future (?)
obj = self._get_by_foreign_id(db, id)
if obj == False:
# Not found in cache, and not previously requested
# None = requested; known to not exist
# BioCyc uses a plain text API for this bit, random.
r = requests.get('http://websvc.biocyc.org/%s/foreignid?ids=%s' % (db, id))
if r.status == 200: # OK
wr = [id, obj.id]
self._foreign_ids[ db ].append( obj )
with open( os.path.join( self.cache_path, self.org_id, 'foreign-' + db), 'a+') as f:
writer = csv.writer(f)
writer.writerow( wr )
return obj
def _get_by_foreign_id(self, db, id):
if db not in self._foreign_ids:
nt = {}
try:
with open( os.path.join( self.cache_path, self.org_id, 'foreign-' + db), 'rU') as f:
reader = csv.reader(f)
for row in reader:
nt[ row[1] ] = self.get(row[0])
except Exception as e:
logging.info(e)
return None
else:
self._foreign_ids[db] = nt
if id in self._foreign_ids[db]:
return self._foreign_ids[db][id]
else:
False
'''
def add_to_localstore(self, obj):
if hasattr(obj, 'localstore'):
with open( os.path.join( self.cache_path, self.org_id, obj.localstore), 'a+') as f:
writer = csv.writer(f)
writer.writerow( [obj.id] )
self._locals[ obj.localstore ].append( obj )
def add_to_names(self, obj):
if hasattr(obj, 'localstore'):
name_list = []
if obj.name is not None:
name_list.append( obj.name ) # Use plaintext name not the html one
name_list.extend( obj.synonyms )
name_list = set(name_list) # Only uniques
with open( os.path.join( self.cache_path, self.org_id, obj.localstore + '-synonyms'), 'a+') as f:
writer = csv.writer(f)
for synonym in name_list:
writer.writerow( [obj.id, synonym] )
self._synonyms[ obj.localstore ][ synonym ] = obj
def set_organism(self, organism):
self.org_id = organism.upper()
mkdir_p( os.path.join( self.cache_path, self.org_id ) )
self._locals = defaultdict(list)
self._synonyms = defaultdict(dict)
self._foreign_ids = defaultdict(list)
def set_detail(self, detail):
if detail in [DETAIL_NONE, DETAIL_LOW, DETAIL_FULL]:
self.detail = detail
else:
raise BioCycInvalidDetailLevel
def set_expiry(self, td):
if type(td) == timedelta:
self.expire_records_after = td
else:
raise BioCycInvalidExpiry
def requestxml(self, url, params):
# Wait so we don't hammer server
if self._hammer_lock is not None:
wait_required = (self._hammer_lock - datetime.now()) + self._hammer_delay
if not wait_required.days < 0:
time.sleep(wait_required.seconds)
self._hammer_lock = datetime.now()
r = requests.get(url, params=params)
if r.status_code == 200:
# Parse and return the XML
return et.fromstring(r.text)
else:
return False
def request_api(self, func, org_id, obj):
return self.requestxml( 'http://websvc.biocyc.org/apixml', {'fn': func, 'id': '%s:%s' % (org_id, obj), 'detail': self.detail } )
def request_obj(self, org_id, obj):
return self.requestxml( 'http://websvc.biocyc.org/getxml', {'id': '%s:%s' % (org_id, obj), 'detail': self.detail } )
def get_from_cache(self, org_id, id):
'''
Get an object from the cache
Use all cache folders available (primary first, then secondary in order) and look for the ID in the dir
if found unpickle and return the object, else return False
FIXME: Check for expiry of object! Return false is expired (will auto-refetch and overwrite)
'''
current_time = datetime.now()
# Check memory cache first
if id in self.memory_cache[org_id]:
obj = self.memory_cache[org_id][id]
if obj.created_at > current_time - self.expire_records_after:
return obj
for cache in [self.cache_path] + self.secondary_cache_paths:
read_path = os.path.join( cache, org_id, id )
try:
with open(read_path, 'rb') as f:
obj = pickle.load(f)
except:
# Continue to try the next cache
pass
else:
# It worked so we have obj
# Check for expiry date; if it's not expired return it else continue
if obj.created_at > current_time - self.expire_records_after:
# If we're here it mustn't be in the memory cache
self.memory_cache[org_id][id] = obj
if len(self.memory_cache[org_id]) > self.max_memory_cache:
self.memory_cache[org_id].popitem(last=False)
return obj
# Else continue looking
# We found nothing (or all expired)
return None
def cache(self, obj):
'''
Store an object in the cache (this allows temporarily assigning a new cache
for exploring the DB without affecting the stored version
'''
# Check cache path exists for current obj
write_path = os.path.join( self.cache_path, obj.org_id )
if not os.path.exists( write_path ):
mkdir_p( write_path )
with open(os.path.join( write_path, obj.id ), 'wb') as f:
pickle.dump( obj, f )
# Add to localstore (keep track of numbers of objects, etc.)
self.add_to_localstore(obj)
self.add_to_names(obj)
def get(self, ids, skip_cache=False):
return self.get_for_org(self.org_id, ids, skip_cache=skip_cache)
def get_for_org(self, org_id, ids, skip_cache=False):
'''
Returns objects for the given identifiers
If called with a list returns a list, else returns a single entity
'''
t = type(ids)
if t != list:
ids = [ids]
objs = []
for id in ids:
if id == '' or type(id) is not str: # Empty string
objs.append(None)
continue
if skip_cache ==False:
obj = self.get_from_cache(org_id, id)
else:
obj = None
if obj is None:
xml = self.request_obj(org_id, id)
obj = self.create_obj_from_xml(id, | |
import numpy as np
import torch
from math import sqrt
from mvt.cores.ops import nms
from mvt.cores.bbox import bbox_mapping_back
def bbox_overlaps_np(bboxes1, bboxes2, mode="iou", eps=1e-6, keep_order=False):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ["iou", "iof"]
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
if not keep_order:
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
else:
exchange = False
area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start, 0) * np.maximum(y_end - y_start, 0)
if mode == "iou":
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg):
"""Merge augmented proposals (multiscale, flip, etc.)
Args:
aug_proposals (list[Tensor]): proposals from different testing
schemes, shape (n, 5). Note that they are not rescaled to the
original image size.
img_metas (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see `Collect`.
rpn_test_cfg (dict): rpn test config.
Returns:
Tensor: shape (n, 4), proposals corresponding to original image scale.
"""
recovered_proposals = []
for proposals, img_info in zip(aug_proposals, img_metas):
img_shape = img_info["img_shape"]
scale_factor = img_info["scale_factor"]
flip = img_info["flip"]
flip_direction = img_info["flip_direction"]
_proposals = proposals.clone()
_proposals[:, :4] = bbox_mapping_back(
_proposals[:, :4], img_shape, scale_factor, flip, flip_direction
)
recovered_proposals.append(_proposals)
aug_proposals = torch.cat(recovered_proposals, dim=0)
merged_proposals, _ = nms(
aug_proposals[:, :4].contiguous(),
aug_proposals[:, -1].contiguous(),
rpn_test_cfg["iou_threshold"],
)
scores = merged_proposals[:, 4]
_, order = scores.sort(0, descending=True)
num = min(rpn_test_cfg.max_num, merged_proposals.shape[0])
order = order[:num]
merged_proposals = merged_proposals[order, :]
return merged_proposals
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]["img_shape"]
scale_factor = img_info[0]["scale_factor"]
flip = img_info[0]["flip"]
flip_direction = img_info[0]["flip_direction"]
bboxes = bbox_mapping_back(
bboxes, img_shape, scale_factor, flip, flip_direction
)
recovered_bboxes.append(bboxes)
bboxes = torch.stack(recovered_bboxes).mean(dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.stack(aug_scores).mean(dim=0)
return bboxes, scores
def merge_aug_scores(aug_scores):
"""Merge augmented bbox scores."""
if isinstance(aug_scores[0], torch.Tensor):
return torch.mean(torch.stack(aug_scores), dim=0)
else:
return np.mean(aug_scores, axis=0)
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
"""Merge augmented mask prediction.
Args:
aug_masks (list[ndarray]): shape (n, #class, h, w)
img_shapes (list[ndarray]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_masks = []
for mask, img_info in zip(aug_masks, img_metas):
flip = img_info[0]["flip"]
flip_direction = img_info[0]["flip_direction"]
if flip:
if flip_direction == "horizontal":
mask = mask[:, :, :, ::-1]
elif flip_direction == "vertical":
mask = mask[:, :, ::-1, :]
else:
raise ValueError(f"Invalid flipping direction '{flip_direction}'")
recovered_masks.append(mask)
if weights is None:
merged_masks = np.mean(recovered_masks, axis=0)
else:
merged_masks = np.average(
np.array(recovered_masks), axis=0, weights=np.array(weights)
)
return merged_masks
def gaussian2D(radius, sigma=1, dtype=torch.float32, device="cpu"):
"""Generate 2D gaussian kernel.
Args:
radius (int): Radius of gaussian kernel.
sigma (int): Sigma of gaussian function. Default: 1.
dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
device (str): Device of gaussian tensor. Default: 'cpu'.
Returns:
h (Tensor): Gaussian kernel with a
``(2 * radius + 1) * (2 * radius + 1)`` shape.
"""
x = torch.arange(-radius, radius + 1, dtype=dtype, device=device).view(1, -1)
y = torch.arange(-radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
return h
def gen_gaussian_target(heatmap, center, radius, k=1):
"""Generate 2D gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (list[int]): Coord of gaussian kernel's center.
radius (int): Radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Default: 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
diameter = 2 * radius + 1
gaussian_kernel = gaussian2D(
radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device
)
x, y = center
height, width = heatmap.shape[:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian_kernel[
radius - top : radius + bottom, radius - left : radius + right
]
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_gaussian * k,
out=out_heatmap[y - top : y + bottom, x - left : x + right],
)
return out_heatmap
def gaussian_radius(det_size, min_overlap):
r"""Generate 2D gaussian radius.
This function is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/
utils.py#L65>`_.
Given ``min_overlap``, radius could computed by a quadratic equation
according to Vieta's formulas.
There are 3 cases for computing gaussian radius, details are following:
- Explanation of figure: ``lt`` and ``br`` indicates the left-top and
bottom-right corner of ground truth box. ``x`` indicates the
generated corner at the limited position when ``radius=r``.
- Case1: one corner is inside the gt box and the other is outside.
.. code:: text
|< width >|
lt-+----------+ -
| | | ^
+--x----------+--+
| | | |
| | | | height
| | overlap | |
| | | |
| | | | v
+--+---------br--+ -
| | |
+----------+--x
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
{r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
{a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h}
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
- Case2: both two corners are inside the gt box.
.. code:: text
|< width >|
lt-+----------+ -
| | | ^
+--x-------+ |
| | | |
| |overlap| | height
| | | |
| +-------x--+
| | | v
+----------+-br -
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
{4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
{a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h}
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
- Case3: both two corners are outside the gt box.
.. code:: text
|< width >|
x--+----------------+
| | |
+-lt-------------+ | -
| | | | ^
| | | |
| | overlap | | height
| | | |
| | | | v
| +------------br--+ -
| | |
+----------------+--x
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
{4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
{a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
{r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
Args:
det_size (list[int]): Shape of object.
min_overlap (float): Min IoU with ground truth for boxes generated by
keypoints inside the gaussian kernel.
Returns:
radius (int): Radius of gaussian kernel.
"""
height, width = det_size
a1 = 1
b1 = height + width
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap | |
<reponame>afermanian/signatory
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Provides speed and memory benchmarks against esig and iisignature."""
import collections as co
import datetime
import io
import itertools as it
import math
import matplotlib.pyplot as plt
import os
import subprocess
import torch
from . import helpers
class InvalidBenchmark(Exception):
"""Raised to indicate a set of options that do not define a valid benchmark."""
def __init__(self, msg, *args, **kwargs):
msg += ' Run `python command.py benchmark --help` for details on how to do this.'
super(InvalidBenchmark, self).__init__(msg, *args, **kwargs)
# This section specifies various constants and options that can be selected for benchmarking
# Here we essentially specify the different libraries we can test.
# We call them 'columns' because we can also include ratios between the libraries as well.
class RatioColumns(helpers.Container):
speedup_cpu_str = 'Ratio CPU (parallel)'
speedup_cpu_no_parallel_str = 'Ratio CPU (no parallel)'
speedup_gpu_str = 'Ratio GPU'
class Columns(RatioColumns):
signatory_cpu_str = 'Signatory CPU (parallel)'
signatory_cpu_no_parallel_str = 'Signatory CPU (no parallel)'
signatory_gpu_str = 'Signatory GPU'
iisignature_str = 'iisignature'
esig_str = 'esig'
colours = {Columns.signatory_cpu_str: 'b',
Columns.signatory_cpu_no_parallel_str: 'c',
Columns.signatory_gpu_str: 'g',
Columns.iisignature_str: 'r',
Columns.esig_str: 'm'}
# Now we specify all the different functions we could benchmark
_signature_forward_fns = co.OrderedDict([(Columns.esig_str, 'esig_signature_forward'),
(Columns.iisignature_str, 'iisignature_signature_forward'),
(Columns.signatory_cpu_no_parallel_str, 'signatory_signature_forward_no_parallel'),
(Columns.signatory_cpu_str, 'signatory_signature_forward'),
(Columns.signatory_gpu_str, 'signatory_signature_forward_gpu')])
_signature_backward_fns = co.OrderedDict([(Columns.esig_str, 'esig_signature_backward'),
(Columns.iisignature_str, 'iisignature_signature_backward'),
(Columns.signatory_cpu_no_parallel_str, 'signatory_signature_backward_no_parallel'),
(Columns.signatory_cpu_str, 'signatory_signature_backward'),
(Columns.signatory_gpu_str, 'signatory_signature_backward_gpu')])
_logsignature_forward_fns = co.OrderedDict([(Columns.esig_str, 'esig_logsignature_forward'),
(Columns.iisignature_str, 'iisignature_logsignature_forward'),
(Columns.signatory_cpu_no_parallel_str, 'signatory_logsignature_forward_no_parallel'),
(Columns.signatory_cpu_str, 'signatory_logsignature_forward'),
(Columns.signatory_gpu_str, 'signatory_logsignature_forward_gpu')])
_logsignature_backward_fns = co.OrderedDict([(Columns.esig_str, 'esig_logsignature_backward'),
(Columns.iisignature_str, 'iisignature_logsignature_backward'),
(Columns.signatory_cpu_no_parallel_str, 'signatory_logsignature_backward_no_parallel'),
(Columns.signatory_cpu_str, 'signatory_logsignature_backward'),
(Columns.signatory_gpu_str, 'signatory_logsignature_backward_gpu')])
class BackwardFunctions(helpers.Container):
signature_backward_fns = {'Signature backward': _signature_backward_fns}
logsignature_backward_fns = {'Logsignature backward': _logsignature_backward_fns}
class Functions(BackwardFunctions):
signature_forward_fns = {'Signature forward': _signature_forward_fns}
logsignature_forward_fns = {'Logsignature forward': _logsignature_forward_fns}
all_fns = co.OrderedDict()
all_fns.update(signature_forward_fns)
all_fns.update(BackwardFunctions.signature_backward_fns)
all_fns.update(logsignature_forward_fns)
all_fns.update(BackwardFunctions.logsignature_backward_fns)
# These are the things we can measure
class Measurables(helpers.Container):
time = 'time'
memory = 'memory'
# These are the different predefined size/depth combinations that we can produce benchmarks for.
class Types(helpers.Container):
class typical(object):
"""Tests two typical use cases."""
sizes = ((32, 128, 8),)
depths = (5, 7)
class channels(object):
"""Tests a number of channels for a fixed depth."""
sizes = ((32, 128, 2), (32, 128, 3), (32, 128, 4), (32, 128, 5), (32, 128, 6), (32, 128, 7))
depths = (7,)
class depths(object):
"""Tests depths for a fixed number of channels."""
sizes = ((32, 128, 4),)
depths = (2, 3, 4, 5, 6, 7, 8, 9)
class small(object):
"""Tests on very small data. This doesn't given meaningful results - the overhead of PyTorch/NumPy/etc. ends up
giving a greater noise than there is signal - but it serves to test the benchmark framework itself.
"""
sizes = ((1, 2, 2),)
depths = (2, 3, 4, 5)
# Done with specifying constants and options
class BenchmarkRunner(object):
"""Runs all functions across all libraries and records their times or memory usage for multiple sizes and depths."""
def __init__(self, type_, test_esig, test_iisignature, test_signatory_gpu, measure, fns, **kwargs):
assert type_ in Types
assert measure in Measurables
assert fns in Functions
if measure is Measurables.memory and test_signatory_gpu:
raise InvalidBenchmark('Memory comparisons for Signatory GPU are not meaningful, as everything else '
'operates on the CPU. Please disable GPU testing.')
if fns in BackwardFunctions and test_esig:
raise InvalidBenchmark('esig does not support backward computations. Please disable esig testing.')
self.sizes = type_.sizes
self.depths = type_.depths
self.test_esig = test_esig
self.test_iisignature = test_iisignature
self.test_signatory_gpu = test_signatory_gpu
self.measure = measure
self.fns = fns
self.title_string = list(fns.keys())[0] + ': ' + measure
self.dirname = self.title_string.lower().replace(' ', '_').replace(':', '') + '_' + type_.__name__
self._results = None
super(BenchmarkRunner, self).__init__(**kwargs)
@property
def results(self):
return self._results
def check_graph(self):
"""Checks whether or not this benchmark is suitable for being plotted as a graph."""
if len(self.sizes) > 1 and len(self.depths) > 1:
raise InvalidBenchmark("Cannot output as graph with multiple sizes and multiple depths.")
if len(list(self.fns.keys())) > 1:
raise InvalidBenchmark("Cannot output as graph with multiple functions.")
batch_size, stream_size, _ = next(iter(self.sizes))
for size in self.sizes:
if size[0] != batch_size or size[1] != stream_size:
raise InvalidBenchmark("Cannot output as graph with multiple batch or stream sizes.")
def run(self):
"""Runs the benchmarks."""
running = True
results = helpers.namedarray(len(self.fns), len(self.sizes), len(self.depths))
for fn_name, fn_dict in self.fns.items():
for size in self.sizes:
for depth in self.depths:
running, results[fn_name, size, depth] = self._run_test(fn_name, fn_dict, size, depth, running)
self._results = results
def _run_test(self, fn_name, fn_dict, size, depth, running):
"""Runs a particular function across multiple different libraries and records their speed or memory usage."""
column_results = co.OrderedDict()
for library_name, library_module_name in fn_dict.items():
if (not self.test_esig) and (library_name is Columns.esig_str):
continue
if (not self.test_signatory_gpu) and (library_name is Columns.signatory_gpu_str):
continue
if (not self.test_iisignature) and (library_name is Columns.iisignature_str):
continue
result = math.inf
if running:
print(self._table_format_index(fn_name, size, depth), library_name)
try:
if self.measure is Measurables.time:
result = self._time(library_module_name, size, depth)
elif self.measure is Measurables.memory:
result = self._memory(library_module_name, size, depth)
else:
raise RuntimeError
except KeyboardInterrupt:
running = False
column_results[library_name] = result
other_best = math.inf
if self.test_iisignature:
other_best = min(column_results[Columns.iisignature_str], other_best)
if self.test_esig:
other_best = min(column_results[Columns.esig_str], other_best)
try:
column_results[Columns.speedup_cpu_str] = other_best / column_results[Columns.signatory_cpu_str]
except ZeroDivisionError:
column_results[Columns.speedup_cpu_str] = math.inf
try:
column_results[Columns.speedup_cpu_no_parallel_str] = other_best / column_results[Columns.signatory_cpu_no_parallel_str]
except ZeroDivisionError:
column_results[Columns.speedup_cpu_no_parallel_str] = math.inf
if self.test_signatory_gpu:
try:
column_results[Columns.speedup_gpu_str] = other_best / column_results[Columns.signatory_gpu_str]
except ZeroDivisionError:
column_results[Columns.speedup_gpu_str] = math.inf
return running, column_results
@classmethod
def _time(cls, library_module_name, size, depth):
return cls._run_file(library_module_name, 'time_', size, depth)
@classmethod
def _memory(cls, library_module_name, size, depth):
result = 0
for _ in range(5):
stdout = cls._run_file(library_module_name, 'memory', size, depth)
if stdout == 0:
# Sometimes things bug out and give a zero memory reading.
# I'm not sure why things seem to be flaky
continue
# Take the maximum, as we sample based on some frequency, and can easily miss a peak when doing this over
# just one run.
# (Yeah this isn't ideal.)
result = max(result, stdout)
if result == 0:
result = math.inf
return result
@staticmethod
def _run_file(library_module_name, filename, size, depth):
if torch.cuda.is_available():
device = int(torch.cuda.current_device())
else:
device = -1
p = subprocess.run('python -m {}.{} {} {} {} {}'
''.format(__package__,
filename,
library_module_name,
str(size).replace(' ', '').replace('(', '').replace(')', ''),
depth,
device),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stderr = p.stderr.decode()
if stderr != '':
print('Error:')
print('------')
print(stderr)
print('')
raise RuntimeError("Error in " + library_module_name)
stdout = p.stdout.decode().strip()
for line in stdout.split('\n'):
if 'Legitimate' not in line:
stdout = line
break
return float(stdout)
@staticmethod
def _table_format_index(fn_name, size, depth):
return "{}, size {}, depth {}".format(fn_name, size, depth)
def _graph_format_index(self, size, depth):
if len(self.sizes) > 1:
return size[-1]
elif len(self.depths) > 1:
return depth
def graph(self, save=False, log=True):
"""Plots the result as a graph."""
self.check_graph()
fig = plt.figure()
ax = fig.gca()
_, example_row_value = next(iter(self.results))
x_axes = [[] for _ in range(len(example_row_value))]
y_axes = [[] for _ in range(len(example_row_value))]
labels = []
for column_heading in example_row_value.keys():
labels.append(column_heading)
for (fn_name, size, depth), row_value in self.results:
for x_axis, y_axis, (column_heading, column_value) in zip(x_axes, y_axes, row_value.items()):
x_axis.append(self._graph_format_index(size, depth))
y_axis.append(column_value)
for x_axis, y_axis, label in zip(x_axes, y_axes, labels):
if label in RatioColumns:
continue
ax.plot(x_axis, y_axis, label=label, color=colours[label])
ncol = 2
# From https://stackoverflow.com/a/10101532/12254339
def flip(items):
return it.chain(*[items[i::ncol] for i in range(ncol)])
handles, labels = ax.get_legend_handles_labels()
legend = ax.legend(flip(handles), flip(labels),
mode='expand', ncol=ncol, bbox_to_anchor=(0, 1.02, 1, 0), loc='lower left',
borderaxespad=0.)
legend_bbox = legend.get_window_extent(fig.canvas.get_renderer())
ax.set_title(self.title_string, y=0.03 + legend_bbox.inverse_transformed(ax.transAxes).ymax)
if self.measure is Measurables.time:
ax.set_ylabel("Time in seconds")
elif self.measure is Measurables.memory:
ax.set_ylabel("Memory usage in MB")
else:
raise RuntimeError
if len(self.sizes) > 1:
ax.set_xlabel("Number of channels")
elif len(self.depths) > 1:
ax.set_xlabel("Depth")
else:
raise RuntimeError
if log:
ax.set_yscale('log')
start, end = ax.get_xlim()
ax.xaxis.set_ticks(range(int(math.ceil(start)), int(math.floor(end)) + 1))
plt.tight_layout()
if save:
if not os.path.isdir(self.dirname):
os.mkdir(self.dirname)
plt.savefig(os.path.join(self.dirname, str(datetime.datetime.utcnow())) + '.png')
else:
plt.show()
def table(self, save=False):
"""Formats the results into a table."""
out_str = ''
def val_to_str(val):
if val == math.inf:
return '-'
if isinstance(val, float):
return '{:.3}'.format(val)
return str(val)
operation_str = 'Operation'
padding = 1
max_row_heading_len | |
#!/usr/bin/env python
#
# generate a tester program for the API
#
import sys
import os
import string
try:
import libxml2
except:
print("libxml2 python bindings not available, skipping testapi.c generation")
sys.exit(0)
if len(sys.argv) > 1:
srcPref = sys.argv[1] + '/'
else:
srcPref = ''
#
# Modules we want to skip in API test
#
skipped_modules = [ "SAX", "xlink", "threads", "globals",
"xmlmemory", "xmlversion", "xmlexports",
]
#
# defines for each module
#
modules_defines = {
"HTMLparser": "LIBXML_HTML_ENABLED",
"catalog": "LIBXML_CATALOG_ENABLED",
"xmlreader": "LIBXML_READER_ENABLED",
"relaxng": "LIBXML_SCHEMAS_ENABLED",
"schemasInternals": "LIBXML_SCHEMAS_ENABLED",
"xmlschemas": "LIBXML_SCHEMAS_ENABLED",
"xmlschemastypes": "LIBXML_SCHEMAS_ENABLED",
"xpath": "LIBXML_XPATH_ENABLED",
"xpathInternals": "LIBXML_XPATH_ENABLED",
"xinclude": "LIBXML_XINCLUDE_ENABLED",
"xpointer": "LIBXML_XPTR_ENABLED",
"xmlregexp" : "LIBXML_REGEXP_ENABLED",
"xmlautomata" : "LIBXML_AUTOMATA_ENABLED",
"xmlsave" : "LIBXML_OUTPUT_ENABLED",
"xmlmodule" : "LIBXML_MODULES_ENABLED",
"pattern" : "LIBXML_PATTERN_ENABLED",
"schematron" : "LIBXML_SCHEMATRON_ENABLED",
}
#
# defines for specific functions
#
function_defines = {
"htmlDefaultSAXHandlerInit": "LIBXML_HTML_ENABLED",
"xmlSAX2EndElement" : "LIBXML_SAX1_ENABLED",
"xmlSAX2StartElement" : "LIBXML_SAX1_ENABLED",
"xmlSAXDefaultVersion" : "LIBXML_SAX1_ENABLED",
"UTF8Toisolat1" : "LIBXML_OUTPUT_ENABLED",
"xmlIOParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDoc": "LIBXML_SAX1_ENABLED",
"xmlParseMemory": "LIBXML_SAX1_ENABLED",
"xmlRecoverDoc": "LIBXML_SAX1_ENABLED",
"xmlParseFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseFileWithData": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDoc": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDTD": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseFile": "LIBXML_SAX1_ENABLED",
"xmlParseEntity": "LIBXML_SAX1_ENABLED",
"xmlParseExternalEntity": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemoryWithData": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemory": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemoryRecover": "LIBXML_SAX1_ENABLED",
"xmlSetupParserForBuffer": "LIBXML_SAX1_ENABLED",
"xmlStopParser": "LIBXML_PUSH_ENABLED",
"xmlAttrSerializeTxtContent": "LIBXML_OUTPUT_ENABLED",
"xmlSAXParseFile": "LIBXML_SAX1_ENABLED",
"xmlSAXParseEntity": "LIBXML_SAX1_ENABLED",
"xmlNewTextChild": "LIBXML_TREE_ENABLED",
"xmlNewDocRawNode": "LIBXML_TREE_ENABLED",
"xmlNewProp": "LIBXML_TREE_ENABLED",
"xmlReconciliateNs": "LIBXML_TREE_ENABLED",
"xmlValidateNCName": "LIBXML_TREE_ENABLED",
"xmlValidateNMToken": "LIBXML_TREE_ENABLED",
"xmlValidateName": "LIBXML_TREE_ENABLED",
"xmlNewChild": "LIBXML_TREE_ENABLED",
"xmlValidateQName": "LIBXML_TREE_ENABLED",
"xmlSprintfElementContent": "LIBXML_OUTPUT_ENABLED",
"xmlValidGetPotentialChildren" : "LIBXML_VALID_ENABLED",
"xmlValidGetValidElements" : "LIBXML_VALID_ENABLED",
"xmlTextReaderPreservePattern" : "LIBXML_PATTERN_ENABLED",
}
#
# Some functions really need to be skipped for the tests.
#
skipped_functions = [
# block on I/O
"xmlFdRead", "xmlReadFd", "xmlCtxtReadFd",
"htmlFdRead", "htmlReadFd", "htmlCtxtReadFd",
"xmlReaderNewFd", "xmlReaderForFd",
"xmlIORead", "xmlReadIO", "xmlCtxtReadIO",
"htmlIORead", "htmlReadIO", "htmlCtxtReadIO",
"xmlReaderNewIO", "xmlBufferDump", "xmlNanoFTPConnect",
"xmlNanoFTPConnectTo", "xmlNanoHTTPMethod", "xmlNanoHTTPMethodRedir",
# Complex I/O APIs
"xmlCreateIOParserCtxt", "xmlParserInputBufferCreateIO",
"xmlRegisterInputCallbacks", "xmlReaderForIO",
"xmlOutputBufferCreateIO", "xmlRegisterOutputCallbacks",
"xmlSaveToIO", "xmlIOHTTPOpenW",
# library state cleanup, generate false leak information and other
# troubles, heavillyb tested otherwise.
"xmlCleanupParser", "xmlRelaxNGCleanupTypes", "xmlSetListDoc",
"xmlSetTreeDoc", "xmlUnlinkNode",
# hard to avoid leaks in the tests
"xmlStrcat", "xmlStrncat", "xmlCatalogAddLocal", "xmlNewTextWriterDoc",
"xmlXPathNewValueTree", "xmlXPathWrapString",
# unimplemented
"xmlTextReaderReadInnerXml", "xmlTextReaderReadOuterXml",
"xmlTextReaderReadString",
# destructor
"xmlListDelete", "xmlOutputBufferClose", "xmlNanoFTPClose", "xmlNanoHTTPClose",
# deprecated
"xmlCatalogGetPublic", "xmlCatalogGetSystem", "xmlEncodeEntities",
"xmlNewGlobalNs", "xmlHandleEntity", "xmlNamespaceParseNCName",
"xmlNamespaceParseNSDef", "xmlNamespaceParseQName",
"xmlParseNamespace", "xmlParseQuotedString", "xmlParserHandleReference",
"xmlScanName",
"xmlDecodeEntities",
# allocators
"xmlMemFree",
# verbosity
"xmlCatalogSetDebug", "xmlShellPrintXPathError", "xmlShellPrintNode",
# Internal functions, no user space should really call them
"xmlParseAttribute", "xmlParseAttributeListDecl", "xmlParseName",
"xmlParseNmtoken", "xmlParseEntityValue", "xmlParseAttValue",
"xmlParseSystemLiteral", "xmlParsePubidLiteral", "xmlParseCharData",
"xmlParseExternalID", "xmlParseComment", "xmlParsePITarget", "xmlParsePI",
"xmlParseNotationDecl", "xmlParseEntityDecl", "xmlParseDefaultDecl",
"xmlParseNotationType", "xmlParseEnumerationType", "xmlParseEnumeratedType",
"xmlParseAttributeType", "xmlParseAttributeListDecl",
"xmlParseElementMixedContentDecl", "xmlParseElementChildrenContentDecl",
"xmlParseElementContentDecl", "xmlParseElementDecl", "xmlParseMarkupDecl",
"xmlParseCharRef", "xmlParseEntityRef", "xmlParseReference",
"xmlParsePEReference", "xmlParseDocTypeDecl", "xmlParseAttribute",
"xmlParseStartTag", "xmlParseEndTag", "xmlParseCDSect", "xmlParseContent",
"xmlParseElement", "xmlParseVersionNum", "xmlParseVersionInfo",
"xmlParseEncName", "xmlParseEncodingDecl", "xmlParseSDDecl",
"xmlParseXMLDecl", "xmlParseTextDecl", "xmlParseMisc",
"xmlParseExternalSubset", "xmlParserHandlePEReference",
"xmlSkipBlankChars",
# Legacy
"xmlCleanupPredefinedEntities", "xmlInitializePredefinedEntities",
"xmlSetFeature", "xmlGetFeature", "xmlGetFeaturesList",
# location sets
"xmlXPtrLocationSetAdd",
"xmlXPtrLocationSetCreate",
"xmlXPtrLocationSetDel",
"xmlXPtrLocationSetMerge",
"xmlXPtrLocationSetRemove",
"xmlXPtrWrapLocationSet",
]
#
# These functions have side effects on the global state
# and hence generate errors on memory allocation tests
#
skipped_memcheck = [ "xmlLoadCatalog", "xmlAddEncodingAlias",
"xmlSchemaInitTypes", "xmlNanoFTPProxy", "xmlNanoFTPScanProxy",
"xmlNanoHTTPScanProxy", "xmlResetLastError", "xmlCatalogConvert",
"xmlCatalogRemove", "xmlLoadCatalogs", "xmlCleanupCharEncodingHandlers",
"xmlInitCharEncodingHandlers", "xmlCatalogCleanup",
"xmlSchemaGetBuiltInType",
"htmlParseFile", "htmlCtxtReadFile", # loads the catalogs
"xmlTextReaderSchemaValidate", "xmlSchemaCleanupTypes", # initialize the schemas type system
"xmlCatalogResolve", "xmlIOParseDTD" # loads the catalogs
]
#
# Extra code needed for some test cases
#
extra_pre_call = {
"xmlSAXUserParseFile": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlSAXUserParseMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemoryRecover": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParserInputBufferCreateFd":
"if (fd >= 0) fd = -1;",
}
extra_post_call = {
"xmlAddChild":
"if (ret_val == NULL) { xmlFreeNode(cur) ; cur = NULL ; }",
"xmlAddEntity":
"if (ret_val != NULL) { xmlFreeNode(ret_val) ; ret_val = NULL; }",
"xmlAddChildList":
"if (ret_val == NULL) { xmlFreeNodeList(cur) ; cur = NULL ; }",
"xmlAddSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddNextSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddPrevSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlDocSetRootElement":
"if (doc == NULL) { xmlFreeNode(root) ; root = NULL ; }",
"xmlReplaceNode":
"""if (cur != NULL) {
xmlUnlinkNode(cur);
xmlFreeNode(cur) ; cur = NULL ; }
if (old != NULL) {
xmlUnlinkNode(old);
xmlFreeNode(old) ; old = NULL ; }
\t ret_val = NULL;""",
"xmlTextMerge":
"""if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
xmlUnlinkNode(second);
xmlFreeNode(second) ; second = NULL ; }""",
"xmlBuildQName":
"""if ((ret_val != NULL) && (ret_val != ncname) &&
(ret_val != prefix) && (ret_val != memory))
xmlFree(ret_val);
\t ret_val = NULL;""",
"xmlNewDocElementContent":
"""xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
"xmlDictReference": "xmlDictFree(dict);",
# Functions which deallocates one of their parameters
"xmlXPathConvertBoolean": """val = NULL;""",
"xmlXPathConvertNumber": """val = NULL;""",
"xmlXPathConvertString": """val = NULL;""",
"xmlSaveFileTo": """buf = NULL;""",
"xmlSaveFormatFileTo": """buf = NULL;""",
"xmlIOParseDTD": "input = NULL;",
"xmlRemoveProp": "cur = NULL;",
"xmlNewNs": "if ((node == NULL) && (ret_val != NULL)) xmlFreeNs(ret_val);",
"xmlCopyNamespace": "if (ret_val != NULL) xmlFreeNs(ret_val);",
"xmlCopyNamespaceList": "if (ret_val != NULL) xmlFreeNsList(ret_val);",
"xmlNewTextWriter": "if (ret_val != NULL) out = NULL;",
"xmlNewTextWriterPushParser": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;} if (ret_val != NULL) ctxt = NULL;",
"xmlNewIOInputStream": "if (ret_val != NULL) input = NULL;",
"htmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"htmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseExtParsedEnt": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlDOMWrapAdoptNode": "if ((node != NULL) && (node->parent == NULL)) {xmlUnlinkNode(node);xmlFreeNode(node);node = NULL;}",
"xmlBufferSetAllocationScheme": "if ((buf != NULL) && (scheme == XML_BUFFER_ALLOC_IMMUTABLE) && (buf->content != NULL) && (buf->content != static_buf_content)) { xmlFree(buf->content); buf->content = NULL;}"
}
modules = []
def is_skipped_module(name):
for mod in skipped_modules:
if mod == name:
return 1
return 0
def is_skipped_function(name):
for fun in skipped_functions:
if fun == name:
return 1
# Do not test destructors
if name.find('Free') != -1:
return 1
return 0
def is_skipped_memcheck(name):
for fun in skipped_memcheck:
if fun == name:
return 1
return 0
missing_types = {}
def add_missing_type(name, func):
try:
list = missing_types[name]
list.append(func)
except:
missing_types[name] = [func]
generated_param_types = []
def add_generated_param_type(name):
generated_param_types.append(name)
generated_return_types = []
def add_generated_return_type(name):
generated_return_types.append(name)
missing_functions = {}
missing_functions_nr = 0
def add_missing_functions(name, module):
global missing_functions_nr
missing_functions_nr = missing_functions_nr + 1
try:
list = missing_functions[module]
list.append(name)
except:
missing_functions[module] = [name]
#
# Provide the type generators and destructors for the parameters
#
def type_convert(str, name, info, module, function, pos):
# res = str.replace(" ", " ")
# res = str.replace(" ", " ")
# res = str.replace(" ", " ")
res = str.replace(" *", "_ptr")
# res = str.replace("*", "_ptr")
res = res.replace(" ", "_")
if res == 'const_char_ptr':
if name.find("file") != -1 or \
name.find("uri") != -1 or \
name.find("URI") != -1 or \
info.find("filename") != -1 or \
info.find("URI") != -1 or \
info.find("URL") != -1:
if function.find("Save") != -1 or \
function.find("Create") != -1 or \
function.find("Write") != -1 or \
function.find("Fetch") != -1:
return('fileoutput')
return('filepath')
if res == 'void_ptr':
if module == 'nanoftp' and name == 'ctx':
return('xmlNanoFTPCtxtPtr')
if function == 'xmlNanoFTPNewCtxt' or \
function == 'xmlNanoFTPConnectTo' or \
function == 'xmlNanoFTPOpen':
return('xmlNanoFTPCtxtPtr')
if module == 'nanohttp' and name == 'ctx':
return('xmlNanoHTTPCtxtPtr')
if function == 'xmlNanoHTTPMethod' or \
function == 'xmlNanoHTTPMethodRedir' or \
function == 'xmlNanoHTTPOpen' or \
function == 'xmlNanoHTTPOpenRedir':
return('xmlNanoHTTPCtxtPtr');
if function == 'xmlIOHTTPOpen':
return('xmlNanoHTTPCtxtPtr')
if name.find("data") != -1:
return('userdata')
if name.find("user") != -1:
return('userdata')
if res == 'xmlDoc_ptr':
res = 'xmlDocPtr'
if res == 'xmlNode_ptr':
res = 'xmlNodePtr'
if res == 'xmlDict_ptr':
res = 'xmlDictPtr'
if res == 'xmlNodePtr' and pos != 0:
if (function == 'xmlAddChild' and pos == 2) or \
(function == 'xmlAddChildList' and pos == 2) or \
(function == 'xmlAddNextSibling' and pos == 2) or \
(function == 'xmlAddSibling' and pos == 2) or \
(function == 'xmlDocSetRootElement' and pos == 2) or \
(function == 'xmlReplaceNode' and pos == 2) or \
(function == 'xmlTextMerge') or \
(function == 'xmlAddPrevSibling' and pos == 2):
return('xmlNodePtr_in');
if res == 'const xmlBufferPtr':
res = 'xmlBufferPtr'
if res == 'xmlChar_ptr' and name == 'name' and \
function.find("EatName") != -1:
return('eaten_name')
if res == 'void_ptr*':
res = 'void_ptr_ptr'
if res == 'char_ptr*':
res = 'char_ptr_ptr'
if res == 'xmlChar_ptr*':
res = 'xmlChar_ptr_ptr'
if res == 'const_xmlChar_ptr*':
res = 'const_xmlChar_ptr_ptr'
if res == 'const_char_ptr*':
res = 'const_char_ptr_ptr'
if res == 'FILE_ptr' and module == 'debugXML':
res = 'debug_FILE_ptr';
if res == 'int' and name == 'options':
if module == 'parser' or module == 'xmlreader':
res = 'parseroptions'
return res
known_param_types = []
def is_known_param_type(name):
for type in known_param_types:
if type == name:
return 1
return name[-3:] == 'Ptr' or name[-4:] == '_ptr'
def generate_param_type(name, rtype):
global test
for type in known_param_types:
if type == name:
return
for type in generated_param_types:
if type == name:
return
if name[-3:] == 'Ptr' or name[-4:] == | |
# coding: utf8
# intente algo como
@auth.requires_login()
@auth.requires_membership('Student')
def index():
assignations = db((db.user_project.assigned_user == auth.user.id)&
(db.user_project.assigned_user == db.auth_user.id)&
(db.user_project.project == db.project.id)&
(db.project.area_level == db.area_level.id)&
(db.user_project.period == db.period_year.id)).select()
cyear_period = get_current_year_period()
def available_reports(assignation_period):
import datetime
current_date = datetime.datetime.now()
#if it is the first semester then the restriction should be:
#start date >= January 1 year 00:00:00
#end date >= January 1 year 00:00:00
#start date < July 1 year 00:00:00
#end date < July 1 year 00:00:00
#if it is the second semester then the restriction should be:
#start date >= July 1 year 00:00:00
#end date >= July 1 year 00:00:00
#start date < Jan 1 year 00:00:00
#end date < Jan 1 year 00:00:00
if assignation_period.period == first_period.id:
date_min = datetime.datetime(assignation_period.yearp, 1, 1)
date_max = datetime.datetime(assignation_period.yearp, 7, 1)
else:
date_min = datetime.datetime(assignation_period.yearp, 7, 1)
date_max = datetime.datetime(assignation_period.yearp, 1, 1)
return db((db.report_restriction.start_date <= current_date)&
(db.report_restriction.end_date >= current_date)&
(db.report_restriction.start_date >= date_min)&
(db.report_restriction.end_date >= date_min)&
(db.report_restriction.start_date < date_max)&
(db.report_restriction.end_date < date_max)&
(db.report_restriction.is_enabled == True))
def available_item_restriction(period_year, user_project):
return db(((db.item_restriction.period==period_year) |
(db.item_restriction.permanent==True))&
(db.item_restriction.is_enabled==True)&
(db.item_restriction_area.item_restriction==\
db.item_restriction.id)&
(db.item_restriction_area.area_level==\
user_project.project.area_level.id)&
(db.item_restriction.item_type!=2))
def restriction_project_exception(item_restriction, assignation):
return db((db.item_restriction_exception.project== \
assignation.project.id)&
(db.item_restriction.id==item_restriction))
def items_instance(item_restriction, assignation):
return db((db.item.item_restriction==item_restriction.id)&
(db.item.assignation==assignation.user_project.id)&
(db.item.is_active==True))
import datetime
current_date = datetime.datetime.now().date()
return dict(assignations = assignations,
available_reports = available_reports,
current_date = current_date,
cyear_period = cyear_period,
available_item_restriction = available_item_restriction,
items_instance = items_instance,
restriction_project_exception=restriction_project_exception)
## Validate that the report date restriction and is_enabled restriction apply to current date
def val_rep_restr(report_restriction):
import datetime
current_date = datetime.datetime.now()
rep_restr = db((db.report_restriction.id == report_restriction)&
(db.report_restriction.start_date <= current_date)&
(db.report_restriction.end_date >= current_date)&
(db.report_restriction.is_enabled == True)).select().first()
return rep_restr != None
## Validate that the report status is editable (it is either 'Draft' or 'Recheck')
def valid_status(report):
return (report.status == db.report_status(db.report_status.name == 'Draft').id) or \
(report.status == db.report_status(db.report_status.name == 'Recheck').id)
def get_current_year_period():
import datetime
cdate = datetime.datetime.now()
cyear = cdate.year
cmonth = cdate.month
period = second_period
#current period depends if we are in dates between jan-jun and jul-dec
if cmonth < 7 :
period = first_period
return db.period_year((db.period_year.yearp == cyear)&
(db.period_year.period == period))
def val_rep_owner(report):
usr_rep = db((db.report.id == report)&
(db.report.assignation == db.user_project.id)&
(db.user_project.assigned_user == auth.user.id)).select().first()
return usr_rep != None
@auth.requires_login()
@auth.requires_membership('Student')
def update_data():
update_data_form = False
if auth.user != None:
cuser = db(db.auth_user.id==auth.user.id).select().first()
form = FORM(
DIV(LABEL(T('First Name:')),
INPUT(_name="first_name",
_type="text", _id="first_name",
_value=cuser.first_name,
requires=IS_NOT_EMPTY())),
DIV(LABEL(T('Last Name:')),
INPUT(_name="last_name",
_type="text", _id="last_name",
_value=cuser.last_name,
requires=IS_NOT_EMPTY())),
DIV(LABEL(T('Email:')),
INPUT(_name="email",
_type="text", _id="email",
_value=cuser.email,
requires=IS_NOT_EMPTY())),
DIV(LABEL(T('Password: (Leave the same for no \
change)')),
INPUT(_name="password",
_type="password", _id="password",
_value=cuser.password,
requires=IS_NOT_EMPTY())),
DIV(LABEL(T('Repeat password: (Leave the blank for \
no change)')),
INPUT(_name="repass",
_type="password", _id="repass")),
DIV(LABEL(T('Phone:')),
INPUT(_name="phone", _type="text",
_id="phone", _value=cuser.phone,
requires=IS_LENGTH(minsize=8,
maxsize=12))),
DIV(LABEL(T('Working:')),
INPUT(_name="working",
_type="checkbox", _id="working",
_value=cuser.working)),
DIV(LABEL(T('Work Address:')),
INPUT(_name="work_address",
_type="text", _id="work_address",
_value=cuser.work_address)),
BR(),
DIV(INPUT(_type='submit',
_value=T('Update Profile'),
_class="btn-primary")),
_class="form-horizontal",)
if form.process().accepted:
first_name = request.vars['first_name']
last_name = request.vars['last_name']
email = request.vars['email']
password = request.vars['password']
repass = request.vars['<PASSWORD>']
phone = request.vars['phone']
working = request.vars['working']
work_address = request.vars['work_address']
#TODO analyze for aditional security steps
cuser=db(db.auth_user.id==auth.user.id).select().first()
if cuser != None:
cuser.first_name = first_name
cuser.last_name = last_name
cuser.email = email
cuser.phone = phone
cuser.data_updated = True
if password == repass and len(repass) > 0:
#TODO Fix password update
cuser.password = db.auth_user.password.validate(password)
if working:
cuser.working = working
cuser.work_address = work_address
cuser.update_record()
response.flash = 'User data updated!'
redirect(URL('default', 'index'))
else:
response.flash = 'Error!'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill the form'
return dict(form=form, update_data_form=True)
@cache.action()
@auth.requires_login()
@auth.requires_membership('Student')
def download():
item = db(db.item.uploaded_file==request.args[0]).select().first()
if item != None and item.assignation.assigned_user == auth.user.id:
return response.download(request, db)
else:
session.flash = T('Access Forbidden')
redirect(URL('default', 'index'))
@auth.requires_login()
@auth.requires_membership('Student')
def item():
cyear_period = get_current_year_period()
item_restriction = request.vars['restriction']
user_project = request.vars['assignation']
item_query = db((db.item.created==cyear_period)&
(db.item.item_restriction==item_restriction)&
(db.item.assignation==user_project))
item_restriction = db(db.item_restriction.id==\
item_restriction).select().first()
if(request.args(0) == 'create'):
if item_query.select().first() == None:
if item_restriction.item_type.name == 'File' and \
item_restriction.teacher_only != True:
form = FORM(
DIV(LABEL(T('Upload '+item_restriction.name+' \
File:')),
INPUT(_name="upload",
_type="file", _id="first_name",
requires=[IS_NOT_EMPTY(), \
IS_UPLOAD_FILENAME( \
extension='^(pdf|doc|docx)$',\
error_message=T('Invalid Format, \
Please upload only PDF, DOC or \
DOCX files files'))])),
BR(),
DIV(INPUT(_type='submit',
_value=T('Upload File'),
_class="btn-primary")),
_class="form-horizontal",)
if form.process().accepted:
if request.vars.upload != None:
item = db.item.uploaded_file.store( \
request.vars.upload.file, \
request.vars.upload.filename)
db.item.insert(uploaded_file=item,
is_active=True,
created=cyear_period,
item_restriction=item_restriction.id,
assignation=user_project)
db.commit()
session.flash = T('Item created!')
redirect(URL('student', 'index'))
else:
session.flash = T('Error')
redirect(URL('student', 'index'))
elif form.errors:
session.flash = T('Errors')
redirect(URL('student', 'index'))
else:
session.flash = T('please fill the form')
return dict(form=form, action='create')
else:
session.flash = T('Action not allowed')
redirect(URL('student', 'index'))
elif(request.args(0) == 'view'):
item_upload = request.vars['file']
item = db((db.item.item_restriction==item_restriction)&
(db.item.assignation==user_project)&
(db.item.uploaded_file==item_upload)).select().first()
if item != None and item_restriction.teacher_only != True \
and item.is_active == True \
and item.assignation.assigned_user == auth.user.id:
return dict(item=item, name=item_restriction.name, action='view')
else:
session.flash = T('Access Forbidden')
redirect(URL('student', 'index'))
elif(request.args(0) == 'edit'):
item = db((db.item.created==cyear_period)&
(db.item.item_restriction==item_restriction)&
(db.item.assignation==user_project)).select().first()
if item == None or item_restriction.teacher_only == True \
or item.is_active != True:
redirect(URL('student', 'index'))
form = FORM(
DIV(LABEL(T('Upload '+item_restriction.name+' \
File:')),
INPUT(_name="upload",
_type="file", _id="first_name",
requires=[IS_NOT_EMPTY(), \
IS_UPLOAD_FILENAME( \
extension='^(pdf|doc|docx)$',\
error_message=T('Invalid Format, \
Please upload only PDF, DOC or \
DOCX files files'))])),
BR(),
DIV(INPUT(_type='submit',
_value=T('Upload File'),
_class="btn-primary")),
_class="form-horizontal",)
if form.process().accepted:
if request.vars.upload != None:
uploaded = db.item.uploaded_file.store(request.vars.upload.file, request.vars.upload.filename)
item = db((db.item.created==cyear_period)&
(db.item.item_restriction==item_restriction)&
(db.item.assignation==user_project)).select().first()
if item != None:
item.update_record(uploaded_file = uploaded)
db.commit()
redirect(URL('student', 'index'))
elif form.errors:
response.flash = "Errors"
else:
response.flash = "please fill the form"
return dict(form=form, action='edit')
@auth.requires_login()
@auth.requires_membership('Student')
def report():
if (request.args(0) == 'create'):
#get the data & save the report
assignation = request.vars['assignation']
report_restriction = request.vars['report_restriction']
# Validate DB report_restriction to obey TIMING rules
valid_rep_restr = val_rep_restr(report_restriction)
# Validate report_restriction
report_restrict = db.report_restriction(db.report_restriction.id == report_restriction)
valid_report = report_restrict != None
# Validate assignation belongs to this user
assign = db.user_project((db.user_project.id == assignation)&
(db.user_project.assigned_user == auth.user.id))
valid_assignation = assign != None
# Validate there is not an already inserted report
valid = db.report((db.report.assignation == assignation)&
(db.report.report_restriction == report_restriction)) is None
if not(assignation and report_restriction and valid and valid_assignation and valid_report
and valid_rep_restr):
session.flash = T('Invalid selected assignation and report. Select a valid one.')
redirect(URL('student','index'))
import datetime
current_date = datetime.datetime.now()
report = db.report.insert(created = current_date,
assignation = assignation,
report_restriction = report_restriction,
status = db.report_status(name = 'Draft'))
session.flash = T('Report is now a draft.')
redirect(URL('student','report/edit', vars = dict(report = report.id)))
elif (request.args(0) == 'edit'):
## Get the report id
report = request.vars['report']
## Retrieve report data
report = db.report(db.report.id == report)
if not(report):
session.flash = T('Selected report can\'t be edited. Select a valid report.')
redirect(URL('student','index'))
## Validate report TIMING restriction
valid_rep_restr = val_rep_restr(report.report_restriction.id)
if not(valid_rep_restr):
session.flash = T('Selected report can\'t be edited. Select a valid report.')
redirect(URL('student','index'))
## Validate that the report belongs to user
valid_report_owner = val_rep_owner(report.id)
if not(valid_report_owner):
session.flash = T('Selected report can\'t be edited. Select a valid report.')
redirect(URL('student','index'))
## Validate that the report status is editable (it is either 'Draft' or 'Recheck')
if not(valid_status(report)):
session.flash = T('Selected report can\'t be edited. Select a valid report.')
redirect(URL('student','index'))
## Markmin formatting of reports
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>'
markmin_settings = {
'latex':lambda code: LATEX % code.replace('"','"'),
'code_cpp':lambda text: CODE(text,language='cpp').xml(),
'code_java':lambda text: CODE(text,language='java').xml(),
'code_python':lambda text: CODE(text,language='python').xml(),
'code_html':lambda text: CODE(text,language='html').xml()}
return dict(state = 'edit',
log_types = db(db.log_type.id > 0).select(),
logs = db.log_entry((db.log_entry.report == report.id)).select(),
anomalies = db((db.log_type.name == 'Anomaly')&
(db.log_entry.log_type == db.log_type.id)&
(db.log_entry.report == report.id)).count(),
markmin_settings = markmin_settings,
report = report)
elif (request.args(0) == 'save'):
## get the data & save the report
report = request.vars['report']
report = db.report(db.report.id == report)
## Validate DB report_restriction to obey TIMING rules
valid_rep_restr = val_rep_restr(report.report_restriction.id)
## Validate that the report status is editable (it is either 'Draft' or 'Recheck')
if not(valid_status(report)):
session.flash = T('Selected report can\'t be saved. Select a valid report.')
redirect(URL('student','index'))
# Validate assignation belongs to this user
assign = db.user_project((db.user_project.id == report.assignation)&
(db.user_project.assigned_user == auth.user.id))
valid_assignation = assign != None
if not(report and valid_assignation and valid_rep_restr):
session.flash = T('Invalid selected assignation and report. Select a valid one.')
redirect(URL('student','index'))
import datetime
current_date = datetime.datetime.now()
report.update(created = current_date,
status = db.report_status(name = 'Draft'))
session.flash = T('Draft Updated.')
redirect(URL('student','index'))
elif (request.args(0) == 'acceptance'):
#get the data & save the report
report = request.vars['report']
report = db.report(db.report.id == report)
# Validate DB report_restriction to obey TIMING rules
valid_rep_restr = val_rep_restr(report.report_restriction.id)
## Validate that the report status is editable (it is either 'Draft' or | |
<filename>qiskit-sdk-py-master/qiskit/mapper/_compiling.py
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Methods to assist with compiling tasks.
"""
import math
import scipy
import numpy as np
from ._mappererror import MapperError
def euler_angles_1q(unitary_matrix):
"""Compute Euler angles for a single-qubit gate.
Find angles (theta, phi, lambda) such that
unitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)
Return (theta, phi, lambda, "U(theta,phi,lambda)"). The last
element of the tuple is the OpenQASM gate name with parameter
values substituted.
"""
small = 1e-10
if unitary_matrix.shape != (2, 2):
raise MapperError("compiling.euler_angles_1q expected 2x2 matrix")
phase = np.linalg.det(unitary_matrix)**(-1.0/2.0)
U = phase * unitary_matrix # U in SU(2)
# OpenQASM SU(2) parameterization:
# U[0, 0] = exp(-i(phi+lambda)/2) * cos(theta/2)
# U[0, 1] = -exp(-i(phi-lambda)/2) * sin(theta/2)
# U[1, 0] = exp(i(phi-lambda)/2) * sin(theta/2)
# U[1, 1] = exp(i(phi+lambda)/2) * cos(theta/2)
# Find theta
if abs(U[0, 0]) > small:
theta = 2 * math.acos(abs(U[0, 0]))
else:
theta = 2 * math.asin(abs(U[1, 0]))
# Find phi and lambda
phase11 = 0.0
phase10 = 0.0
if abs(math.cos(theta/2.0)) > small:
phase11 = U[1, 1] / math.cos(theta/2.0)
if abs(math.sin(theta/2.0)) > small:
phase10 = U[1, 0] / math.sin(theta/2.0)
phiplambda = 2 * math.atan2(np.imag(phase11), np.real(phase11))
phimlambda = 2 * math.atan2(np.imag(phase10), np.real(phase10))
phi = 0.0
if abs(U[0, 0]) > small and abs(U[1, 0]) > small:
phi = (phiplambda + phimlambda) / 2.0
lamb = (phiplambda - phimlambda) / 2.0
else:
if abs(U[0, 0]) < small:
lamb = -phimlambda
else:
lamb = phiplambda
# Check the solution
Rzphi = np.array([[np.exp(-1j*phi/2.0), 0],
[0, np.exp(1j*phi/2.0)]], dtype=complex)
Rytheta = np.array([[np.cos(theta/2.0), -np.sin(theta/2.0)],
[np.sin(theta/2.0), np.cos(theta/2.0)]], dtype=complex)
Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0],
[0, np.exp(1j*lamb/2.0)]], dtype=complex)
V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))
if np.linalg.norm(V - U) > small:
raise MapperError("compiling.euler_angles_1q incorrect result")
return theta, phi, lamb, "U(%.15f,%.15f,%.15f)" % (theta, phi, lamb)
def simplify_U(theta, phi, lam):
"""Return the gate u1, u2, or u3 implementing U with the fewest pulses.
U(theta, phi, lam) is the input gate.
The returned gate implements U exactly, not up to a global phase.
Return (gate_string, params, "OpenQASM string") where gate_string is one of
"u1", "u2", "u3", "id" and params is a 3-tuple of parameter values. The
OpenQASM string is the name of the gate with parameters substituted.
"""
epsilon = 1e-13
name = "u3"
params = (theta, phi, lam)
qasm = "u3(%.15f,%.15f,%.15f)" % params
# Y rotation is 0 mod 2*pi, so the gate is a u1
if abs(params[0] % (2.0 * math.pi)) < epsilon:
name = "u1"
params = (0.0, 0.0, params[1] + params[2] + params[0])
qasm = "u1(%.15f)" % params[2]
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if name == "u3":
# theta = pi/2 + 2*k*pi
if abs((params[0] - math.pi / 2) % (2.0 * math.pi)) < epsilon:
name = "u2"
params = (math.pi / 2, params[1],
params[2] + (params[0] - math.pi / 2))
qasm = "u2(%.15f,%.15f)" % (params[1], params[2])
# theta = -pi/2 + 2*k*pi
if abs((params[0] + math.pi / 2) % (2.0 * math.pi)) < epsilon:
name = "u2"
params = (math.pi / 2, params[1] + math.pi,
params[2] - math.pi + (params[0] + math.pi / 2))
qasm = "u2(%.15f,%.15f)" % (params[1], params[2])
# u1 and lambda is 0 mod 4*pi so gate is nop
if name == "u1" and abs(params[2] % (4.0 * math.pi)) < epsilon:
name = "id"
params = (0.0, 0.0, 0.0)
qasm = "id"
return name, params, qasm
def rz_array(theta):
"""Return numpy array for Rz(theta).
Rz(theta) = diag(exp(-i*theta/2),exp(i*theta/2))
"""
return np.array([[np.exp(-1j*theta/2.0), 0],
[0, np.exp(1j*theta/2.0)]], dtype=complex)
def ry_array(theta):
"""Return numpy array for Ry(theta).
Ry(theta) = [[cos(theta/2), -sin(theta/2)],
[sin(theta/2), cos(theta/2)]]
"""
return np.array([[math.cos(theta/2.0), -math.sin(theta/2.0)],
[math.sin(theta/2.0), math.cos(theta/2.0)]],
dtype=complex)
def two_qubit_kak(unitary_matrix):
"""Decompose a two-qubit gate over CNOT + SU(2) using the KAK decomposition.
Based on MATLAB implementation by <NAME>.
Computes a sequence of 10 single and two qubit gates, including 3 CNOTs,
which multiply to U, including global phase. Uses Vatan and Williams
optimal two-qubit circuit (quant-ph/0308006v3). The decomposition algorithm
which achieves this is explained well in Drury and Love, 0806.4015.
unitary_matrix = numpy 4x4 unitary matrix
"""
if unitary_matrix.shape != (4, 4):
raise MapperError("compiling.two_qubit_kak expected 4x4 matrix")
phase = np.linalg.det(unitary_matrix)**(-1.0/4.0)
# Make it in SU(4), correct phase at the end
U = phase * unitary_matrix
# B changes to the Bell basis
B = (1.0/math.sqrt(2)) * np.array([[1, 1j, 0, 0],
[0, 0, 1j, 1],
[0, 0, 1j, -1],
[1, -1j, 0, 0]], dtype=complex)
# U' = Bdag . U . B
Uprime = np.dot(np.transpose(B.conjugate()), np.dot(U, B))
# M^2 = trans(U') . U'
M2 = np.dot(np.transpose(Uprime), Uprime)
# Diagonalize M2
# Must use diagonalization routine which finds a real orthogonal matrix P
# when M2 is real.
D, P = np.linalg.eig(M2)
# If det(P) == -1, apply a swap to make P in SO(4)
if abs(np.linalg.det(P)+1) < 1e-5:
swap = np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]], dtype=complex)
P = np.dot(P, swap)
D = np.diag(np.dot(swap, np.dot(np.diag(D), swap)))
Q = np.diag(np.sqrt(D)) # array from elementwise sqrt
# Want to take square root so that Q has determinant 1
if abs(np.linalg.det(Q)+1) < 1e-5:
Q[0, 0] = -Q[0, 0]
Kprime = np.dot(Uprime, np.dot(P, np.dot(np.linalg.inv(Q),
np.transpose(P))))
K1 = np.dot(B, np.dot(Kprime, np.dot(P, np.transpose(B.conjugate()))))
A = np.dot(B, np.dot(Q, np.transpose(B.conjugate())))
K2 = np.dot(B, np.dot(np.transpose(P), np.transpose(B.conjugate())))
KAK = np.dot(K1, np.dot(A, K2))
if np.linalg.norm(KAK - U, 2) > 1e-6:
raise MapperError("compiling.two_qubit_kak: " +
"unknown error in KAK decomposition")
# Compute parameters alpha, beta, gamma so that
# A = exp(i * (alpha * XX + beta * YY + gamma * ZZ))
x = np.array([[0, 1], [1, 0]], dtype=complex)
y = np.array([[0, -1j], [1j, 0]], dtype=complex)
z = np.array([[1, 0], [0, -1]], dtype=complex)
xx = np.kron(x, x)
yy = np.kron(y, y)
zz = np.kron(z, z)
alpha = math.atan(np.trace(np.imag(np.dot(A, xx)))/np.trace(np.real(A)))
beta = math.atan(np.trace(np.imag(np.dot(A, yy)))/np.trace(np.real(A)))
gamma = math.atan(np.trace(np.imag(np.dot(A, zz)))/np.trace(np.real(A)))
# K1 = kron(U1, U2) and K2 = kron(V1, V2)
# Find the matrices U1, U2, V1, V2
L = K1[0:2, 0:2]
if np.linalg.norm(L) < 1e-9:
L = K1[0:2, 2:4]
if np.linalg.norm(L) < 1e-9:
L = K1[2:4, 2:4]
Q = np.dot(L, np.transpose(L.conjugate()))
U2 = L / np.sqrt(Q[0, 0])
R = np.dot(K1, np.kron(np.identity(2), np.transpose(U2.conjugate())))
U1 = np.array([[0, 0], [0, 0]], dtype=complex)
U1[0, 0] = R[0, 0]
U1[0, 1] = R[0, 2]
U1[1, 0] = R[2, 0]
U1[1, 1] = R[2, 2]
L = K2[0:2, 0:2]
if np.linalg.norm(L) < 1e-9:
L = K2[0:2, 2:4]
if np.linalg.norm(L) < 1e-9:
L = K2[2:4, 2:4]
Q = np.dot(L, np.transpose(L.conjugate()))
V2 = L / np.sqrt(Q[0, 0])
R = np.dot(K2, np.kron(np.identity(2), np.transpose(V2.conjugate())))
V1 = np.array([[0, 0], [0, 0]], dtype=complex)
V1[0, 0] = R[0, 0]
V1[0, 1] = R[0, 2]
V1[1, 0] = R[2, 0]
V1[1, 1] = R[2, 2]
if np.linalg.norm(np.kron(U1, U2) - K1) > 1e-4 or \
np.linalg.norm(np.kron(V1, V2) - K2) > 1e-4:
raise MapperError("compiling.two_qubit_kak: " +
"error in SU(2) x SU(2) part")
test = scipy.linalg.expm(1j*(alpha * xx + beta * yy + gamma * zz))
if np.linalg.norm(A - test) > 1e-4:
raise MapperError("compiling.two_qubit_kak: " +
"error in A part")
# Circuit that implements K1 * A * K2 (up to phase), using
# Vatan and Williams Fig. 6 of quant-ph/0308006v3
# Include prefix and suffix single-qubit gates into U2, V1 respectively.
V2 = np.dot(np.array([[np.exp(1j*np.pi/4), 0],
[0, np.exp(-1j*np.pi/4)]], dtype=complex), V2)
U1 = np.dot(U1, np.array([[np.exp(-1j*np.pi/4), 0],
[0, np.exp(1j*np.pi/4)]], dtype=complex))
# Corrects global phase: exp(ipi/4)*phase'
U1 = np.dot(U1, np.array([[np.exp(1j*np.pi/4), 0],
[0, np.exp(1j*np.pi/4)]], dtype=complex))
U1 = phase.conjugate() * U1
# | |
* 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'ben']),
'rewardamount': COIN * 60 // 365 + 1,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'more_useramount1': 300 * COIN,
'more_useramount2': 200 * COIN,
'user_outputs_dest': 'user+user', # means (user_outputs_count == 2)
'accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# based on base00, reward_to ben+other: accepted
'name': 'm48',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'more_useramount1': 200 * COIN,
'reward_to': 'ben+other',
'rewardamount': COIN * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'user_outputs_dest': 'user+user', # means (user_outputs_count == 2)
'accepted': True,
},
{
# rewardamount < fee_total: accepted
'name': 'm49',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 1 * COIN,
'reward_to': 'user',
'rewardamount': COIN // 10 // 365, # less than 0.001
'fee_total': '0.005',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# based on base00, reward_to: 'user+other', 'user+ben+other': accepted
'name': 'm50',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'more_useramount1': 200 * COIN,
'reward_to': [ 'user+other', 'user+ben+other' ],
'rewardamount': COIN * 30 // 365,
'fee_total': 'auto',
'fee_user_percent': 50,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'user_outputs_dest': 'user+user', # means (user_outputs_count == 2)
'accepted': True,
},
{
# based on base00, reward_to: 'user+ben: accepted
'name': 'm51',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user+ben',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 0,
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
},
{
# based on base00, (keys_count_required is not set, by default keys_count_required == keys_count_total): accepted
'name': 'm52',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'ben', 'other']),
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': [ 0, None ],
'keys_count_used': 'auto',
'accepted': True,
},
{
# m52, (keys_count_used != keys_count_required): rejected
'name': 'm52e1',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'ben', 'other']),
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': 5,
'keys_count_required': 3,
'keys_count_used': [ 1, 2, 4, 5 ],
'accepted': False,
'error': (64, BAD_REWARD_SCRIPT),
},
{
# m52, (keys_count_required > keys_count_total): rejected
'name': 'm52e2',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'ben', 'other']),
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': 5,
'keys_count_required': 6,
'keys_count_used': 5,
'accepted': False,
'error': (64, BAD_REWARD_SCRIPT),
},
#
# Step2 series: tests with 2 steps of minting, user output of the first step is user input for the second one:
#
{
# (reward_to ben, useramount between steps is without changes): accepted
'name': 'step2_01',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'ben',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': COIN * 10 // 365,
'step2_reward_to': choice(['user', 'ben']),
'step2_accepted': True,
},
{
# step2_01, reward_to ben, step2_rewardamount +1 satoshi more than allowed: rejected
'name': 'step2_01e',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'ben',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': COIN * 10 // 365 + 1,
'step2_reward_to': choice(['user', 'ben']),
'step2_accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
{
# (reward_to user, useramount between steps is accumulated): accepted
'name': 'step2_02',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': (COIN * 10 + COIN // 365) // 365,
'step2_reward_to': 'user',
'step2_accepted': True,
},
{
# step2_02, reward_to user, step2_rewardamount +1 satoshi more than allowed: rejected
'name': 'step2_02e',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 22 * 60 * 60,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'user',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': (COIN * 10 + COIN // 365) // 365 + 1,
'step2_reward_to': 'user',
'step2_accepted': False,
'error': (64, BAD_REWARD_ROBBERY),
},
#
# L series (Limits): tests with cert expiration date and minting limits:
#
{
# (rewardamount <= ca3_minting_limit): accepted
'name': 'L01',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 0,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'other']),
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'ca3_minting_limit': [ COIN * 10 // 365, COIN * 10 // 365 + 1000 ],
'accepted': True,
},
{
# (rewardamount > ca3_minting_limit): rejected
# (ca3_minting_limit <= 0): rejected
'name': 'L01e',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': False,
'ca3_age': 0,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': choice(['user', 'other']),
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'ca3_minting_limit': [ COIN * 10 // 365 - 1, 0, -100 ],
'accepted': False,
'error': (64, BAD_REWARD_LIMIT_EXCEEDED),
},
{
# sum(rewardamount) <= ca3_minting_limit: accepted
'name': 'L02',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 0,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'ben',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'ca3_minting_limit': [ COIN * 10 // 365 * 2, COIN * 10 // 365 * 2 + 1, COIN * 10 // 365 * 200 ],
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': COIN * 10 // 365,
'step2_reward_to': choice(['user', 'ben']),
'step2_accepted': True,
},
{
# sum(rewardamount) > ca3_minting_limit: rejected
# Example: minting limit L=100
# M1=100, M2=50: rejected (M1 + M2 > L) where (M1 == L)
# M1=60, M2=50: rejected (M1 + M2 > L)
'name': 'L02e1',
'rootcertamount': 1 * COIN,
'greenflag': True,
'ca3certamount': 1000000,
'ben_enabled': True,
'ca3_age': 0,
'usermoney_age': 24 * 60 * 60,
'useramount': 100 * COIN,
'reward_to': 'ben',
'rewardamount': COIN * 10 // 365,
'fee_total': 'auto',
'fee_user_percent': 'auto',
'refill_moneybox': 'random',
'keys_count_total': randint(1,15),
'keys_count_required': 'random',
'keys_count_used': 'auto',
'ca3_minting_limit': [ COIN * 10 // 365, COIN * 10 // 365 + 1, COIN * 10 // 365 * 2 - 1 ],
'accepted': True,
'step2_enabled': True,
'step2_wait_interval': 24 * 60 * 60,
'step2_rewardamount': COIN * 10 // 365,
'step2_reward_to': choice(['user', 'ben']),
'step2_accepted': False,
'error': (64, BAD_REWARD_LIMIT_EXCEEDED),
},
{
# last_rewardamount > ca3_minting_limit: rejected
# Example: minting limit L=100
# M1=50, M2=150: rejected (M2 > L)
| |
<gh_stars>0
# -*- coding: utf-8 -*-
import unittest
from includes import *
from common import getConnectionByEnv, waitForIndex, sortedResults, toSortedFlatList, check_server_version
from time import sleep
from RLTest import Env
string1 = 'For the exchange of decimal floating-point numbers, \
interchange formats of any multiple of 32 bits are defined. \
As with binary interchange, the encoding scheme for the decimal interchange formats encodes the sign, exponent, and significand. \
Two different bit-level encodings are defined, and interchange is complicated by the fact that some external indicator of the encoding in use may be required. \
The two options allow the significand to be encoded as a compressed sequence of decimal digits using densely packed decimal or, alternatively, as a binary integer. \
The former is more convenient for direct hardware implementation of the standard, while the latter is more suited to software emulation on a binary computer. \
In either case, the set of numbers (combinations of sign, significand, and exponent) \
that may be encoded is identical, and special values (±zero with the minimum exponent, ±infinity, quiet NaNs, and signaling NaNs) have identical encodings.'
string2 = 'For the binary formats, the representation is made unique by choosing the smallest representable exponent allowing the value to be represented exactly. \
Further, the exponent is not represented directly, but a bias is added so that the smallest representable exponent is represented as 1, with 0 used for subnormal numbers. \
For numbers with an exponent in the normal range (the exponent field being neither all ones nor all zeros), \
the leading bit of the significand will always be 1. \
Consequently, a leading 1 can be implied rather than explicitly present in the memory encoding, \
and under the standard the explicitly represented part of the significand will lie between 0 and 1. \
This rule is called leading bit convention, implicit bit convention, or hidden bit convention. \
This rule allows the binary format to have an extra bit of precision. \
The leading bit convention cannot be used for the subnormal numbers as they have an exponent outside \
the normal exponent range and scale by the smallest represented exponent as used for the smallest normal numbers.'
def testProfileSearch(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CONFIG', 'SET', 'MAXPREFIXEXPANSIONS', 1000000)
env.cmd('FT.CONFIG', 'SET', '_PRINT_PROFILE_CLOCK', 'false')
env.cmd('ft.create', 'idx', 'SCHEMA', 't', 'text')
conn.execute_command('hset', '1', 't', 'hello')
conn.execute_command('hset', '2', 't', 'world')
env.expect('ft.profile', 'profile', 'idx', '*', 'nocontent').error().contains('Bad command type')
# test WILDCARD
actual_res = conn.execute_command('ft.profile', 'search', 'idx', '*', 'nocontent')
expected_res = [[2L, '1', '2'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Wildcard iterator', 3L]],
['Result processors profile',
['Index', 3L],
['Scorer', 3L],
['Sorter', 3L]]]]
env.assertEqual(actual_res, expected_res)
# test EMPTY
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'redis', 'nocontent')
expected_res = [[0L],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Empty iterator', 1L]],
['Result processors profile',
['Index', 1L],
['Scorer', 1L],
['Sorter', 1L]]]]
env.assertEqual(actual_res, expected_res)
# test single term
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'hello', 'nocontent')
expected_res = [[1L, '1'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Term reader', 'hello', 2L]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]]
env.assertEqual(actual_res, expected_res)
# test UNION
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'hello|world', 'nocontent')
expected_res = [[2L, '1', '2'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Union iterator - UNION', 3L,
['Term reader', 'hello', 2L],
['Term reader', 'world', 2L]]],
['Result processors profile',
['Index', 3L],
['Scorer', 3L],
['Sorter', 3L]]]]
env.assertEqual(actual_res, expected_res)
# test INTERSECT
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'hello world', 'nocontent')
expected_res = [[0L],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Intersect iterator', 1L,
['Term reader', 'hello', 2L],
['Term reader', 'world', 1L]]],
['Result processors profile',
['Index', 1L],
['Scorer', 1L],
['Sorter', 1L]]]]
env.assertEqual(actual_res, expected_res)
# test NOT
actual_res = conn.execute_command('ft.profile', 'search', 'idx', '-hello', 'nocontent')
expected_res = [[1L, '2'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Not iterator', 2L,
['Term reader', 'hello', 0L]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]]
env.assertEqual(actual_res, expected_res)
# test OPTIONAL
actual_res = conn.execute_command('ft.profile', 'search', 'idx', '~hello', 'nocontent')
expected_res = [[2L, '1', '2'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Optional iterator', 3L,
['Term reader', 'hello', 0L]]],
['Result processors profile',
['Index', 3L],
['Scorer', 3L],
['Sorter', 3L]]]]
env.assertEqual(actual_res, expected_res)
# test PREFIX
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'hel*', 'nocontent')
expected_res = [[1L, '1'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Union iterator - PREFIX - hel', 2L,
['Term reader', 'hello', 2L]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]]
env.assertEqual(actual_res, expected_res)
# test FUZZY
actual_res = conn.execute_command('ft.profile', 'search', 'idx', '%%hel%%', 'nocontent')
expected_res = [[1L, '1'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Union iterator - FUZZY - hel', 2L,
['Term reader', 'hello', 2L]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]]
env.assertEqual(actual_res, expected_res)
# test ID LIST iter with INKEYS
actual_res = conn.execute_command('ft.profile', 'search', 'idx', '%%hel%%', 'inkeys', 1, '1')
expected_res = [[1L, '1', ['t', 'hello']],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Intersect iterator', 2L,
['ID-List iterator', 2L],
['Union iterator - FUZZY - hel', 1L,
['Term reader', 'hello', 1L]]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L],
['Loader', 2L]]]]
env.assertEqual(actual_res, expected_res)
actual_res = conn.execute_command('ft.profile', 'search', 'idx', 'hello(hello(hello(hello(hello))))', 'nocontent')
expected_res = [[1L, '1'],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Intersect iterator', 2L,
['Term reader', 'hello', 2L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Term reader', 'hello', 1L]]]]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]]
env.assertEqual(actual_res, expected_res)
if not check_server_version(env, '6.00.20'):
return
actual_res = env.expect('ft.profile', 'search', 'idx', 'hello(hello(hello(hello(hello(hello)))))', 'nocontent')
expected_res = [1L, '1',
['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Intersect iterator', 2L,
['Term reader', 'hello', 2L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L,
['Term reader', 'hello', 1L],
['Intersect iterator', 1L, None, None]]]]]]],
['Result processors profile',
['Index', 2L],
['Scorer', 2L],
['Sorter', 2L]]]
def testProfileSearchLimited(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CONFIG', 'SET', 'MAXPREFIXEXPANSIONS', 1000000)
env.cmd('FT.CONFIG', 'SET', '_PRINT_PROFILE_CLOCK', 'false')
env.cmd('ft.create', 'idx', 'SCHEMA', 't', 'text')
conn.execute_command('hset', '1', 't', 'hello')
conn.execute_command('hset', '2', 't', 'hell')
conn.execute_command('hset', '3', 't', 'help')
conn.execute_command('hset', '4', 't', 'helowa')
actual_res = conn.execute_command('ft.profile', 'limited', 'search', 'idx', '%hell% hel*')
expected_res = [[3L, '1', ['t', 'hello'], '2', ['t', 'hell'], '3', ['t', 'help']],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Intersect iterator', 4L,
['Union iterator - FUZZY - hell', 4L, 'The number of iterators in union is 3'],
['Union iterator - PREFIX - hel', 3L, 'The number of iterators in union is 4']]],
['Result processors profile',
['Index', 4L],
['Scorer', 4L],
['Sorter', 4L],
['Loader', 4L]]]]
env.assertEqual(actual_res, expected_res)
def testProfileAggregate(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CONFIG', 'SET', 'MAXPREFIXEXPANSIONS', 1000000)
env.cmd('FT.CONFIG', 'SET', '_PRINT_PROFILE_CLOCK', 'false')
env.cmd('ft.create', 'idx', 'SCHEMA', 't', 'text')
conn.execute_command('hset', '1', 't', 'hello')
conn.execute_command('hset', '2', 't', 'world')
actual_res = conn.execute_command('ft.profile', 'aggregate', 'idx', 'hello',
'groupby', 1, '@t',
'REDUCE', 'count', '0', 'as', 'sum')
expected_res = [[1L, ['t', 'hello', 'sum', '1']],
[ ['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Term reader', 'hello', 2L]],
['Result processors profile',
['Index', 2L],
['Loader', 2L],
['Grouper', 2L]]]]
env.assertEqual(actual_res, expected_res)
actual_res = env.cmd('ft.profile', 'aggregate', 'idx', '*',
'load', 1, 't',
'apply', 'startswith(@t, "hel")', 'as', 'prefix')
expected_res = [[1L, ['t', 'hello', 'prefix', '1'], ['t', 'world', 'prefix', '0']],
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Wildcard iterator', 3L]],
['Result processors profile',
['Index', 3L],
['Loader', 3L],
['Projector - Function startswith', 3L]]]]
env.assertEqual(actual_res, expected_res)
def testProfileCursor(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CONFIG', 'SET', 'MAXPREFIXEXPANSIONS', 1000000)
env.cmd('FT.CONFIG', 'SET', '_PRINT_PROFILE_CLOCK', 'false')
env.cmd('ft.create', 'idx', 'SCHEMA', 't', 'text')
conn.execute_command('hset', '1', 't', 'hello')
conn.execute_command('hset', '2', 't', 'world')
actual_res = conn.execute_command('ft.profile', 'aggregate', 'idx', '*',
'load', 1, '@t',
'WITHCURSOR', 'COUNT', 10)
expected_res = [[1L, ['t', 'hello'], ['t', 'world']],
0L, # cursorID
[['Total profile time'],
['Parsing and iterator creation time'],
['Iterators profile',
['Wildcard iterator', | |
properties (
(properties = modes_of(God)) &
(a_infers_b_from_c(Intellect, properties, definition(God))) &
infinite(properties)
).
% All modes follow from the divine nature.
%number(DivineAttributes, Infinity).
all m ( is_mode(m) -> follows_from(m, DivineNature) ).
""",
'1p19': """
% 1p19: God is eternal, or all God's attributes are eternal.
eternal(God).
""",
'1p2': """
% 1p2: Two substances having different attributes have nothing in common with
% one another.
%
% Dem.: This also evident from 1def3. For each must be in itself and be
% conceived through itself, or the concept of the one does not involve the
% concept of the other.
all s1 all s2 (
% If there are two substances...
( is_substance(s1) & is_substance(s2) & -(s1=s2) )
% ...then...
->
% ...there is no attribute they have in common.
-( exists a ( is_attribute_of(a, s1) & is_attribute_of(a, s2) ) )
).
""",
# 1p21: All the things which follow from the absolute nature of any of God's
# attributes have always had to exist and be infinite, or are, through the same
# attribute, eternal and infinite.
'1p21': """
% 1p21: All the things which follow from the absolute nature of any of God's
% attributes
all t (
(
% simplify for now
follows_from(t, God)
%exists nature exists a (
% is_attribute_of(a, God) &
% is_absolute_nature_of(nature, a) &
% follows_from(t, nature)
%)
)
->
% have always had to exist and be infinite, or are, through the same
% attribute, eternal and infinite.
(
is_infinite(t)
%has_always_existed(t) & has_always_been_infinite(t) &
%eternal_through(t, a) &
%infinite_through(t, a)
)
).
""",
'1p22': """
% 1p22: Whatever follows from some attribute of God
% insofar as it is modified by a modification which,
% through the same attribute, exists necessarily and
% is infinite, must also exist necessarily and be
% infinite.
% Whatever follows from some attribute of God insofar as it is modified by
% a modification which, through the same attribute, exists necessarily and
% is infinite
all x exists mod exists attribute (
(
% Note simplification here: to follow from an attribute insofar as
% it is modified is to follow from a modification of that attribute
follows_from(x, mod) &
is_modification_of(mod, attribute) &
exists_necessarily(mod) &
is_infinite(mod)
)
->
(
exists_necessarily(x) & is_infinite(x)
)
).
""",
# 1p24: The essence of things produced by God does not involve
# existence.
'1p24': """
% The phrase 'things produced by God' implicitly excludes
% God.
all x (
(produced_by(x,God) & -(God=x))
->
-essence_involves_existence(x)
).
""",
# 1p24c: From this it follows that God is not only the cause of things'
# beginning to exist, but also of their persevering in existing, *or* (to use a
# Scholastic term) God is the cause of the being of things. For -- whether the
# things [NS: produced] exist or not -- so long as we attend to their essence,
# we shall find that it involves neither existence nor duration. So their
# essence can be the cause neither of their existence nor of their duration,
# but only God, to whose nature alone it pertains to exist[, can be the cause]
# (by 1p14c1).
'1p24c': """
% God is...the cause of things' persevering in existing.
all t exists b ( is_being_of(b, t) & partial_cause(God, b) ).
% Noticed while doing this: we can't translate this as "God is *the* cause
% of the being of things" because there are other causes. So we must
% translate it as "God is a cause"
""",
# 1p25c: Particular things are nothing but affections of God's attributes, *or*
# modes by which God's attributes are expressed in a certain and determinate
# way. The demonstration is evident from 1p15 and 1def5.
'1p25c': """
% Paraphrasing as: each particular thing is nothing but an affection of an
% attribute of God and this affection is a mode that expresses an attribute
% of God.
all t (
is_particular_thing(t) ->
(
exists attribute exists affection (
is_nothing_but(t, affection) &
is_affection_of(affection, attribute) &
is_attribute_of(attribute, God)
)
&
is_mode(affection)
&
expresses_in_a_certain_and_determinate_way(affection, attribute)
)
).
""",
# 1p26: A thing which has been determined to produce an effect has necessarily
# been determined in this way by God; and one which has not been determined by
# God cannot determine itself to produce an effect.
'1p26': """
% Simplification: doesn't subselect things in particular
all t (
(exists e (determined_to_produce(t, e)))
->
x_determines_y_to_produce_z(God, t, e)
).
""",
# 1p28: Every singular thing, or any thing which is finite and has a
# determinate existence, can neither exist nor be determined to produce an
# effect unless it is determined to exist and produce an effect by another
# cause, which is also finite and has a determinate existence; and again, this
# cause also can neither exist nor be determined to produce an effect unless it
# is determined to exist and produce an effect by another, which is also finite
# and has a determinate existence, and so on, to infinity.
'1p28': """
%all x (is_finite(x) -> exists y (causes(y,x))).
%all x all y ((causes(x,y) & infinite(x)) -> infinite(y)).
all x (infinite(x)).
% For simplicity, ignoring "exists" and just handling "determined to exist"
%all y (
% % Every singular thing, or any thing which is finite and has a
% % determinate existence,
% (
% %is_singular(y) &
% is_finite(y) &
% %has_determinate_existence(y) &
% %determined_to_produce_effect(y)
% )
% % can neither exist nor be determined to produce an
% % effect unless
% ->
% % it is determined to exist and produce an effect by another cause,
% % which is also finite and has a determinate existence;
% (
% exists x ( %exists z
% is_infinite(x) %determines_to_exist(x, y) &
% %x_determines_y_to_produce_z(x, y, z) &
% %is_finite(x) %&
% %has_determinate_existence(x)
% )
% )
%).
""",
# intermediary step on the way to 1p28
'1p28-i': """
all x (
is_finite(x) -> -causes(God, x)
).
""",
'1p2': """
% 1p2: Two substances having different attributes have nothing in common with
% one another.
%
% Dem.: This also evident from 1def3. For each must be in itself and be
% conceived through itself, or the concept of the one does not involve the
% concept of the other.
all s1 all s2 (
% If there are two substances with different attributes...
(
( is_substance(s1) & is_substance(s2) & -(s1=s2) )
& -exists a ( is_attribute_of(a, s1) && is_attribute_of(a, s2) )
)
% ...then...
->
% ...there is no thing that each has.
-exists t ( has(s1, t) && has(s2, t) )
).
""",
'1p3': """
% 1p3: If things have nothing in common with one another, one of them cannot be
% the cause of the other.
%
% Dem.: If they have nothing in common with one another, then (by 1a5) they
% cannot be understood through one another, and so (by 1a4) one cannot be the
% cause of the other, q.e.d.
% Used in 1p6
% Given two things,
all a all b (
% If there is no thing that they both have, ...
-(exists t ( has(a, t) & has(b, t) ))
% ...then...
->
% ...neither is understood through nor causes the other.
(
neither_is_understood_through_the_other(a, b)
&
neither_causes_the_other(a, b)
)
).
% Note that "they cannot be understood through one another" must mean "neither
% can be understood through the another", although strictly speaking it could
% mean that it is impossible to (simultaneously?) understand each through the
% other.
""",
'1p4': """
% 1p4: Two or more distinct things are distinguished from one another, either
% by a difference in the attributes of the substances or by a difference in
% their affections.
%
% Dem.: Whatever is, is either in itself or in another (by 1a1), i.e. (by 1def3
% and 1def5), outside the intellect there is nothing except substances and
% their affections. Therefore, there is nothing outside the intellect through
% which a number of things can be distinguished from one another except
% substances, or what is the same (by 1def4), their attributes, and their
% affections, q.e.d.
% For simplicity, I will just stick to two things.
% all a | |
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import re
import requests
import responses
from ibm_cloud_networking_services.firewall_access_rules_v1 import *
crn = 'testString'
service = FirewallAccessRulesV1(
authenticator=NoAuthAuthenticator(),
crn=crn
)
base_url = 'https://api.cis.cloud.ibm.com'
service.set_service_url(base_url)
##############################################################################
# Start of Service: InstanceLevelFirewallAccessRules
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_all_account_access_rules
#-----------------------------------------------------------------------------
class TestListAllAccountAccessRules():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_all_account_access_rules()
#--------------------------------------------------------
@responses.activate
def test_list_all_account_access_rules_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
notes = 'testString'
mode = 'block'
configuration_target = 'ip'
configuration_value = '1.2.3.4'
page = 38
per_page = 5
order = 'target'
direction = 'asc'
match = 'any'
# Invoke method
response = service.list_all_account_access_rules(
notes=notes,
mode=mode,
configuration_target=configuration_target,
configuration_value=configuration_value,
page=page,
per_page=per_page,
order=order,
direction=direction,
match=match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'notes={}'.format(notes) in query_string
assert 'mode={}'.format(mode) in query_string
assert 'configuration.target={}'.format(configuration_target) in query_string
assert 'configuration.value={}'.format(configuration_value) in query_string
assert 'page={}'.format(page) in query_string
assert 'per_page={}'.format(per_page) in query_string
assert 'order={}'.format(order) in query_string
assert 'direction={}'.format(direction) in query_string
assert 'match={}'.format(match) in query_string
#--------------------------------------------------------
# test_list_all_account_access_rules_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_all_account_access_rules_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_all_account_access_rules()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_list_all_account_access_rules_value_error()
#--------------------------------------------------------
@responses.activate
def test_list_all_account_access_rules_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.list_all_account_access_rules(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for create_account_access_rule
#-----------------------------------------------------------------------------
class TestCreateAccountAccessRule():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_account_access_rule()
#--------------------------------------------------------
@responses.activate
def test_create_account_access_rule_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AccountAccessRuleInputConfiguration model
account_access_rule_input_configuration_model = {}
account_access_rule_input_configuration_model['target'] = 'ip'
account_access_rule_input_configuration_model['value'] = 'ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ'
# Set up parameter values
mode = 'block'
notes = 'This rule is added because of event X that occurred on date xyz'
configuration = account_access_rule_input_configuration_model
# Invoke method
response = service.create_account_access_rule(
mode=mode,
notes=notes,
configuration=configuration,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['mode'] == 'block'
assert req_body['notes'] == 'This rule is added because of event X that occurred on date xyz'
assert req_body['configuration'] == account_access_rule_input_configuration_model
#--------------------------------------------------------
# test_create_account_access_rule_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_account_access_rule_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.create_account_access_rule()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_create_account_access_rule_value_error()
#--------------------------------------------------------
@responses.activate
def test_create_account_access_rule_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.create_account_access_rule(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_account_access_rule
#-----------------------------------------------------------------------------
class TestDeleteAccountAccessRule():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_account_access_rule()
#--------------------------------------------------------
@responses.activate
def test_delete_account_access_rule_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "f1aba936b94213e5b8dca0c0dbf1f9cc"}}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
accessrule_identifier = 'testString'
# Invoke method
response = service.delete_account_access_rule(
accessrule_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_account_access_rule_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_account_access_rule_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "f1aba936b94213e5b8dca0c0dbf1f9cc"}}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
accessrule_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"accessrule_identifier": accessrule_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_account_access_rule(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_account_access_rule
#-----------------------------------------------------------------------------
class TestGetAccountAccessRule():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_account_access_rule()
#--------------------------------------------------------
@responses.activate
def test_get_account_access_rule_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
| |
PrintStatement(StringLiteral("Casting Spell temp2")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio temp2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp3")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio temp3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="il1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill1")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ill1.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill2")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ill2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill3")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ill3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench1")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ench1.wav"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench2")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ench2.wav"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench3")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio ench3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench3")),
]),
),
# Take away charge
SetAttrStatement(
obj="__INPUT__",
name="charged",
rvalue=IntegerLiteral(0),
),
]),
# -> Wand is empty
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Wand is not charged")),
OutputStatement(
output=4,
resource=StringLiteral("Playaudio failed.wav"),
),
]),
),
])
wand_cast_castle_ast = CompoundStatement(statements=[
# if charged
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="charged",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(1),
),
# -> Wand is charged play spell
if_body=CompoundStatement(statements=[
# if spell = 2, output sound and set spell 1
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio temp1.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp2")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio temp2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp3")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio temp3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="il1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill1")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ill1.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill2")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ill2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill3")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ill3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench1")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ench1.wav"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench2")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ench2.wav"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench3")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio ench3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench3")),
]),
),
# Take away charge
SetAttrStatement(
obj="__INPUT__",
name="charged",
rvalue=IntegerLiteral(0),
),
]),
# -> Wand is empty
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Wand is not charged")),
OutputStatement(
output=6,
resource=StringLiteral("Playaudio failed.wav"),
),
]),
),
])
wand_cast_train_ast = CompoundStatement(statements=[
# if charged
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="charged",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(1),
),
# -> Wand is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp1")),
# if spell = 2, output sound and set spell 1
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio temp1.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp2")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio temp2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="temp3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell temp3")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio temp3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="temp3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast temporal3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not temp3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="il1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill1")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio ill1.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill2")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio ill2.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill2",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion2"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill2")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ill3",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ill3")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio ill3.mp3"),
),
SetAttrStatement(
obj="__INPUT__",
name="ill3",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast illusion3"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ill3")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench1",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench1")),
OutputStatement(
output=7,
resource=StringLiteral("Playaudio ench1.wav"),
),
SetAttrStatement(
obj="__INPUT__",
name="ench1",
rvalue=IntegerLiteral(1),
),
SetAttrStatement(
obj="__INPUT__",
name="previousCast",
rvalue=StringLiteral("Previously cast enchantment1"),
),
]),
# -> Not this spell
else_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Not ench1")),
]),
),
IfElseStatement(
condition=BinaryOp(
left=GetAttrExpression(
obj="__INPUT__",
name="ench2",
),
operator=nodes.Operator.EQ,
right=IntegerLiteral(2),
),
# -> Spell is charged play spell
if_body=CompoundStatement(statements=[
PrintStatement(StringLiteral("Casting Spell ench2")),
| |
import dataclasses
from abc import ABC, abstractmethod
from typing import Optional, Any, Dict, Union
import torch
from falkon.sparse import SparseTensor
from falkon.mmv_ops.fmm import fmm
from falkon.mmv_ops.fmmv import fdmmv, fmmv
from falkon.utils.helpers import check_same_dtype, check_sparse, check_same_device
from falkon.options import FalkonOptions
class Kernel(ABC):
"""Abstract kernel class. Kernels should inherit from this class, overriding appropriate methods.
To extend Falkon with new kernels, you should read the documentation of this class
carefully, and take a look at the existing implementation of :class:`~falkon.kernels.GaussianKernel`
or :class:`~falkon.kernels.LinearKernel`.
There are several abstract methods which should be implemented, depending on which kind of operations
which are supported by the implementing kernel.
The :meth:`compute` method should compute the kernel matrix, without concerns for differentiability,
:meth:`compute_diff` instead should compute the kernel matrix in such a way that the output
is differentiable with respect to the inputs, and to the kernel parameters. Finally the
:meth:`compute_sparse` method is used to compute the kernel for sparse input matrices. It need
not be differentiable.
Kernels may have several parameters, for example the length-scale of the Gaussian kernel, the
exponent of the polynomial kernel, etc. The kernel should be differentiable with respect to
some such parameters (the afore mentioned length-scale for example), but not with respect to
others (for example the nu parameter of Matern kernels). Each concrete kernel class must
specify the differentiable parameters with the :meth:`diff_params` method, and other parameters
with the :meth:`nondiff_params`.
Additionally kernels which implemenet the :meth:`compute_diff` method should also implement
the :meth:`detach` method which returns a new instance of the kernel, with its parameters
detached from the computation graph.
To provide a KeOps implementation, you will have to inherit also from the
:class:`~falkon.kernels.keops_helpers.KeopsKernelMixin` class, and implement its abstract methods.
In case a KeOps implementation is provided, you should make sure to override the
:meth:`_decide_mmv_impl` and :meth:`_decide_dmmv_impl` so that the KeOps implementation is
effectively used. Have a look at the :class:`~falkon.kernels.PolynomialKernel` class for
an example of how to integrate KeOps in the kernel.
Parameters
----------
name
A short name for the kernel (e.g. "Gaussian")
kernel_type
A short string describing the type of kernel. This may be used to create specialized
functions in :mod:`falkon.mmv_ops` which optimize for a specific kernel type.
opt
Base set of options to be used for operations involving this kernel.
"""
def __init__(self, name: str, kernel_type: str, opt: Optional[FalkonOptions]):
self.name = name
self.kernel_type = kernel_type
if opt is None:
opt = FalkonOptions()
self.params: FalkonOptions = opt
@staticmethod
def _check_dmmv_dimensions(X1: torch.Tensor, X2: torch.Tensor, v: Optional[torch.Tensor],
w: Optional[torch.Tensor], out: Optional[torch.Tensor]):
# Parameter validation
if v is None and w is None:
raise ValueError("One of v and w must be specified to run fdMMV.")
if X1.dim() != 2:
raise ValueError("Matrix X1 must be 2D.")
if X2.dim() != 2:
raise ValueError("Matrix X2 must be 2D.")
if v is not None and v.dim() == 1:
v = v.reshape((-1, 1))
if v is not None and v.dim() != 2:
raise ValueError(
f"v must be a vector or a 2D matrix. Found {len(v.shape)}D.")
if w is not None and w.dim() == 1:
w = w.reshape((-1, 1))
if w is not None and w.dim() != 2:
raise ValueError(
f"w must be a vector or a 2D matrix. Found {len(w.shape)}D.")
# noinspection PyUnresolvedReferences
T = v.size(1) if v is not None else w.size(1)
M = X2.size(0)
if out is not None and out.shape != (M, T):
raise ValueError(
f"Output dimension is incorrect. "
f"Expected ({M}, {T}) found {out.shape}")
if v is not None and v.shape != (X2.size(0), T):
raise ValueError(
f"Dimensions of matrix v are incorrect: "
f"Expected ({M}, {T}) found {v.shape}")
if w is not None and w.shape != (X1.size(0), T):
raise ValueError(
f"Dimensions of matrix w are incorrect: "
f"Expected ({X1.size(0)}, {T}) found {w.shape}")
if not check_same_dtype(X1, X2, v, w, out):
raise TypeError("Data types of input matrices must be equal.")
return X1, X2, v, w, out
@staticmethod
def _check_mmv_dimensions(X1: torch.Tensor, X2: torch.Tensor, v: torch.Tensor,
out: Optional[torch.Tensor]):
# Parameter validation
if X1.dim() != 2:
raise ValueError("Matrix X1 must be 2D.")
if X2.dim() != 2:
raise ValueError("Matrix X2 must be 2D.")
if v.dim() == 1:
v = v.reshape((-1, 1))
if v.dim() != 2:
raise ValueError(
f"v must be a vector or a 2D matrix. Found {len(v.shape)}D.")
if out is not None and out.shape != (X1.size(0), v.size(1)):
raise ValueError(
f"Output dimension is incorrect. "
f"Expected ({X1.size(0)}, {v.size(1)}) found {out.shape}")
if v.shape != (X2.size(0), v.size(1)):
raise ValueError(
f"Dimensions of matrix v are incorrect: "
f"Expected ({X2.size(0)}, {v.size(1)}) found {v.shape}")
if not check_same_dtype(X1, X2, v, out):
raise TypeError("Data types of input matrices must be equal.")
return X1, X2, v, out
@staticmethod
def _check_mm_dimensions(X1: torch.Tensor, X2: torch.Tensor, out: Optional[torch.Tensor]):
# Parameter validation
if X1.dim() != 2:
raise ValueError("Matrix X1 must be 2D.")
if X2.dim() != 2:
raise ValueError("Matrix X2 must be 2D.")
N = X1.size(0)
M = X2.size(0)
if out is not None and out.shape != (N, M):
raise ValueError(
f"Output dimension is incorrect. "
f"Expected ({N}, {M}) found {out.shape}")
if not check_same_dtype(X1, X2, out):
raise TypeError("Data types of input matrices must be equal.")
return X1, X2, out
@staticmethod
def _check_device_properties(*args, fn_name: str, opt: FalkonOptions):
if not check_same_device(*args):
raise RuntimeError("All input arguments to %s must be on the same device" % (fn_name))
def __call__(self, X1: torch.Tensor, X2: torch.Tensor, out: Optional[torch.Tensor] = None,
opt: Optional[FalkonOptions] = None):
"""Compute the kernel matrix between ``X1`` and ``X2``
Parameters
----------
X1 : torch.Tensor
The first data-matrix for computing the kernel. Of shape (N x D):
N samples in D dimensions.
X2 : torch.Tensor
The second data-matrix for computing the kernel. Of shape (M x D):
M samples in D dimensions. Set ``X2 == X1`` to compute a symmetric kernel.
out : torch.Tensor or None
Optional tensor of shape (N x M) to hold the output. If not provided it will
be created.
opt : Optional[FalkonOptions]
Options to be used for computing the operation. Useful are the memory size options
and CUDA options.
Returns
-------
out : torch.Tensor
The kernel between ``X1`` and ``X2``.
"""
X1, X2, out = self._check_mm_dimensions(X1, X2, out)
self._check_device_properties(X1, X2, out, fn_name="kernel", opt=opt)
params = self.params
if opt is not None:
params = dataclasses.replace(self.params, **dataclasses.asdict(opt))
mm_impl = self._decide_mm_impl(X1, X2, params)
return mm_impl(self, params, out, X1, X2)
def _decide_mm_impl(self, X1: torch.Tensor, X2: torch.Tensor, opt: FalkonOptions):
"""Choose which `mm` function to use for this data.
Note that `mm` functions compute the kernel itself so **KeOps may not be used**.
Parameters
----------
X1 : torch.Tensor
First data matrix, of shape (N x D)
X2 : torch.Tensor
Second data matrix, of shape (M x D)
opt : FalkonOptions
Falkon options. Options may be specified to force GPU or CPU usage.
Returns
-------
mm_fn
A function which allows to perform the `mm` operation.
Notes
-----
This function decides based on the inputs: if the inputs are sparse, it will choose
the sparse implementations; if CUDA is detected, it will choose the CUDA implementation;
otherwise it will simply choose the basic CPU implementation.
"""
sparsity = check_sparse(X1, X2)
if not all(sparsity) and any(sparsity):
raise ValueError("Either all or none of 'X1', 'X2' must be sparse.")
return fmm
def mmv(self,
X1: Union[torch.Tensor, SparseTensor],
X2: Union[torch.Tensor, SparseTensor],
v: torch.Tensor,
out: Optional[torch.Tensor] = None, opt: Optional[FalkonOptions] = None):
# noinspection PyShadowingNames
"""Compute matrix-vector multiplications where the matrix is the current kernel.
Parameters
----------
X1 : torch.Tensor
The first data-matrix for computing the kernel. Of shape (N x D):
N samples in D dimensions.
X2 : torch.Tensor
The second data-matrix for computing the kernel. Of shape (M x D):
M samples in D dimensions. Set `X2 == X1` to compute a symmetric kernel.
v : torch.Tensor
A vector to compute the matrix-vector product. This may also be a matrix of shape
(M x T), but if `T` is very large the operations will be much slower.
out : torch.Tensor or None
Optional tensor of shape (N x T) to hold the output. If | |
<gh_stars>0
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for the action_helper.py module """
from unittest.mock import patch
import yaml
from shipyard_airflow.control.helpers import action_helper
from shipyard_airflow.control.helpers.design_reference_helper import (
DesignRefHelper
)
def get_repeated_steps():
"""Returns a list of fake step dictionaries with repeated steps (tries)
For use in testing getting the latest of a step
Currently, for tests that use this, the only thing that matters is the
task ID and the try number. If this function gets used by more/future tests
more data may need to be added
task_A tries: 1, 2, 3
task_B tries: 1
task_C tries: 1, 2
task_D tries: 1
task_E tries: 1
:returns: A list of fake (and incomplete) step dictionaries, some of which
are repeated across multiple tries
:rtype: list
"""
return [
{
'task_id': 'task_A',
'try_number': 1
},
{
'task_id': 'task_A',
'try_number': 2
},
{
'task_id': 'task_A',
'try_number': 3
},
{
'task_id': 'task_B',
'try_number': 1
},
{
'task_id': 'task_C',
'try_number': 2
},
{
'task_id': 'task_C',
'try_number': 1
},
{
'task_id': 'task_D',
'try_number': 1
},
{
'task_id': 'task_E',
'try_number': 1
}
]
def get_fake_latest_step_dict_failed():
"""Make a fake dictionary of "latest" steps that represent a failed dag
The only key required by the tests calling this function is "state", so
the steps contained in the returned dict are incomplete
:returns: A dictionary of "latest" steps that represent a failed dag
:rtype: dict
"""
return {
'armada_build': {'state': 'failed'},
'arbitrary_step': {'state': 'success'},
'another_arbitrary_step': {'state': 'running'},
'upgrade_airflow': {'state': 'success'},
'concurrency_check': {'state': 'success'}
}
def get_fake_latest_step_dict_running():
"""Make a fake dictionary of "latest" steps that represent a running dag
The only key required by the tests calling this function is "state", so
the steps contained in the returned dict are incomplete
:returns: A dictionary of "latest" steps that represent a running dag
:rtype: dict
"""
return {
'armada_build': {'state': 'queued'},
'arbitrary_step': {'state': 'success'},
'another_arbitrary_step': {'state': 'running'},
'upgrade_airflow': {'state': 'running'},
'concurrency_check': {'state': 'success'}
}
def get_fake_latest_step_dict_successful():
"""Make a fake dictionary of "latest" steps that represent a successful dag
The only key required by the tests calling this function is "state", so
the steps contained in the returned dict are incomplete
:returns: A dictionary of "latest" steps that represent a successful dag
:rtype: dict
"""
return {
'armada_build': {'state': 'success'},
'arbitrary_step': {'state': 'success'},
'another_arbitrary_step': {'state': 'success'},
'upgrade_airflow': {'state': 'skipped'},
'concurrency_check': {'state': 'success'}
}
def get_fake_latest_step_dict_unknown():
"""Make a fake dictionary of "latest" steps that represent a dag of unknown
result
The only key required by the tests calling this function is "state", so
the steps contained in the returned dict are incomplete
:returns: A dictionary of "latest" steps that represent a dag of unknown
result
:rtype: dict
"""
return {
'armada_build': {'state': 'success'},
'arbitrary_step': {'state': 'what'},
'another_arbitrary_step': {'state': 'are'},
'upgrade_airflow': {'state': 'these'},
'concurrency_check': {'state': 'states?'}
}
def test_determine_lifecycle():
dag_statuses = [
{'input': 'queued', 'expected': 'Pending'},
{'input': 'ShUTdown', 'expected': 'Failed'},
{'input': 'RUNNING', 'expected': 'Processing'},
{'input': 'None', 'expected': 'Pending'},
{'input': None, 'expected': 'Pending'},
{'input': 'bogusBroken', 'expected': 'Unknown (bogusBroken)'},
]
for status_pair in dag_statuses:
assert(status_pair['expected'] ==
action_helper.determine_lifecycle(status_pair['input']))
def test_get_step():
# Set up actions helper
action_id = '01CPV581B0CM8C9CA0CFRNVPPY' # id in db
actions = yaml.safe_load("""
---
- id: 01CPV581B0CM8C9CA0CFRNVPPY
name: update_software
parameters: {}
dag_id: update_software
dag_execution_date: 2018-09-07T23:18:04
user: admin
datetime: 2018-09-07 23:18:04.38546+00
context_marker: 10447c79-b02c-4dfd-a9e8-1362842f029d
...
""")
action_helper.ActionsHelper._get_action_db = lambda \
self, action_id: actions[0]
tasks = yaml.safe_load("""
---
- task_id: armada_get_status
dag_id: update_software.armada_build
execution_date: 2018-09-07 23:18:04
start_date: 2018-09-07 23:18:55.950298
end_date: 2018-09-07 23:18:58.159597
duration: 2.209299
state: success
try_number: 1
hostname: airflow-worker-0.airflow-worker-discovery.ucp.svc.cluster.local
unixname: airflow
job_id: 11
pool:
queue: default
priority_weight: 3
operator: ArmadaGetStatusOperator
queued_dttm:
pid: 249
max_tries: 0
- task_id: armada_get_status
dag_id: update_software.armada_build
execution_date: 2018-09-07 23:18:04
start_date: 2018-09-07 23:18:55.950298
end_date: 2018-09-07 23:18:58.159597
duration: 2.209299
state: success
try_number: 2
hostname: airflow-worker-1.airflow-worker-discovery.ucp.svc.cluster.local
unixname: airflow
job_id: 12
pool:
queue: default
priority_weight: 3
operator: ArmadaGetStatusOperator
queued_dttm:
pid: 249
max_tries: 0
- task_id: armada_post_apply
dag_id: update_software.armada_build
execution_date: 2018-09-07 23:18:04
start_date: 2018-09-07 23:48:25.884615
end_date: 2018-09-07 23:48:50.552757
duration: 24.668142
state: success
try_number: 2
hostname: airflow-worker-0.airflow-worker-discovery.ucp.svc.cluster.local
unixname: airflow
job_id: 13
pool:
queue: default
priority_weight: 2
operator: ArmadaPostApplyOperator
queued_dttm:
pid: 329
max_tries: 3
- task_id: armada_get_releases
dag_id: update_software.armada_build
execution_date: 2018-09-07 23:18:04
start_date: 2018-09-07 23:48:59.024696
end_date: 2018-09-07 23:49:01.471963
duration: 2.447267
state: success
try_number: 1
hostname: airflow-worker-0.airflow-worker-discovery.ucp.svc.cluster.local
unixname: airflow
job_id: 14
pool:
queue: default
priority_weight: 1
operator: ArmadaGetReleasesOperator
queued_dttm:
pid: 365
max_tries: 0
- task_id: armada_build
dag_id: update_software
execution_date: 2018-09-07 23:18:04
start_date: 2018-09-07 23:18:47.447987
end_date: 2018-09-07 23:49:02.397515
duration: 1814.949528
state: success
try_number: 1
hostname: airflow-worker-0.airflow-worker-discovery.ucp.svc.cluster.local
unixname: airflow
job_id: 9
pool:
queue: default
priority_weight: 5
operator: SubDagOperator
queued_dttm: 2018-09-07 23:18:45.772501
pid: 221
max_tries: 0
...
""")
action_helper.ActionsHelper._get_tasks_db = lambda \
self, dag_id, execution_date: tasks
actions_helper = action_helper.ActionsHelper(action_id=action_id)
# Retrieve step
step_id = 'armada_get_status' # task_id in db
# test backward compatibility with no additional param
step = actions_helper.get_step(step_id)
assert(step['hostname'].startswith('airflow-worker-0'))
# test explicit None
try_number = None
step = actions_helper.get_step(step_id, try_number)
assert(step['hostname'].startswith('airflow-worker-0'))
# test try_number associated with 0 worker
try_number = 1
step = actions_helper.get_step(step_id, try_number)
assert(step['hostname'].startswith('airflow-worker-0'))
# test try_number associated with 1 worker
try_number = 2
step = actions_helper.get_step(step_id, try_number)
assert(step['hostname'].startswith('airflow-worker-1'))
@patch('shipyard_airflow.control.helpers.deckhand_client.DeckhandClient.'
'get_path')
@patch('shipyard_airflow.control.helpers.design_reference_helper.'
'DesignRefHelper.get_design_reference_href', return_value='href')
def test_get_deployment_status_no_action_helper_completed_failed(get_href,
get_path):
action = {
'committed_rev_id': 'rev_id',
'context_marker': 'markofcontext',
'dag_status': 'FAILED',
'id': 'action_id',
'timestamp': 'my_timestamp',
'user': 'cool-person'
}
expected_data = {
'status': 'completed',
'results': 'failed',
'context-marker': action['context_marker'],
'action': action['id'],
'document_url': 'href',
'user': action['user'],
'date': action['timestamp']
}
deployment_status = action_helper.get_deployment_status(action)
assert deployment_status['status'] == expected_data['status']
assert deployment_status['results'] == expected_data['results']
assert (deployment_status['context-marker'] ==
expected_data['context-marker'])
assert deployment_status['action'] == expected_data['action']
assert deployment_status['document_url'] == expected_data['document_url']
assert deployment_status['user'] == expected_data['user']
assert deployment_status['date'] == expected_data['date']
get_href.assert_called_once_with(action['committed_rev_id'])
assert get_path.called # This means we created a DesignRefHelper object
@patch('shipyard_airflow.control.helpers.deckhand_client.DeckhandClient.'
'get_path')
@patch('shipyard_airflow.control.helpers.design_reference_helper.'
'DesignRefHelper.get_design_reference_href', return_value='href')
def test_get_deployment_status_no_action_helper_completed_success(get_href,
get_path):
action = {
'committed_rev_id': 'rev_id',
'context_marker': 'markofcontext',
'dag_status': 'SUCCESS',
'id': 'action_id',
'timestamp': 'my_timestamp',
'user': 'cool-person'
}
expected_data = {
'status': 'completed',
'results': 'successful',
'context-marker': action['context_marker'],
'action': action['id'],
'document_url': 'href',
'user': action['user'],
'date': action['timestamp']
}
deployment_status = action_helper.get_deployment_status(action)
assert deployment_status['status'] == expected_data['status']
assert deployment_status['results'] == expected_data['results']
assert (deployment_status['context-marker'] ==
expected_data['context-marker'])
assert deployment_status['action'] == expected_data['action']
assert deployment_status['document_url'] == expected_data['document_url']
assert deployment_status['user'] == expected_data['user']
assert deployment_status['date'] == expected_data['date']
get_href.assert_called_once_with(action['committed_rev_id'])
assert get_path.called # This means we created a DesignRefHelper object
@patch.object(action_helper.ActionsHelper,
'get_result_from_dag_steps',
return_value='result')
@patch('shipyard_airflow.control.helpers.deckhand_client.DeckhandClient.'
'get_path')
@patch('shipyard_airflow.control.helpers.design_reference_helper.'
'DesignRefHelper.get_design_reference_href', return_value='href')
def test_get_deployment_status_use_action_helper(get_href,
get_path,
get_result):
action = {
'committed_rev_id': 'rev_id',
'context_marker': 'markofcontext',
'dag_status': 'ASDFJKL:',
'id': 'action_id',
'timestamp': 'my_timestamp',
'user': 'cool-person'
}
expected_data = {
'status': 'running',
'results': 'result',
'context-marker': action['context_marker'],
'action': action['id'],
'document_url': 'href',
'user': action['user'],
'date': action['timestamp']
}
deployment_status = action_helper.get_deployment_status(action)
assert deployment_status['status'] == expected_data['status']
assert deployment_status['results'] == expected_data['results']
assert (deployment_status['context-marker'] ==
expected_data['context-marker'])
assert deployment_status['action'] == expected_data['action']
assert deployment_status['document_url'] == expected_data['document_url']
assert deployment_status['user'] == expected_data['user']
assert deployment_status['date'] == expected_data['date']
get_href.assert_called_once_with(action['committed_rev_id'])
assert get_result.called
assert get_path.called # This means we created a DesignRefHelper object
@patch.object(action_helper.ActionsHelper,
'get_result_from_dag_steps',
return_value='result')
@patch('shipyard_airflow.control.helpers.deckhand_client.DeckhandClient.'
'get_path')
@patch('shipyard_airflow.control.helpers.design_reference_helper.'
'DesignRefHelper.get_design_reference_href', return_value='href')
def test_get_deployment_status_use_action_helper_force_completed(get_href,
get_path,
get_result):
action = {
'committed_rev_id': 'rev_id',
'context_marker': 'markofcontext',
'dag_status': 'ASDFJKL:',
'id': 'action_id',
'timestamp': 'my_timestamp',
'user': 'cool-person'
}
expected_data = {
'status': 'completed',
'results': 'result',
'context-marker': action['context_marker'],
'action': action['id'],
'document_url': 'href',
'user': action['user'],
'date': action['timestamp']
}
deployment_status = action_helper.get_deployment_status(action, True)
assert deployment_status['status'] == expected_data['status']
assert deployment_status['results'] == expected_data['results']
assert (deployment_status['context-marker'] ==
expected_data['context-marker'])
assert deployment_status['action'] == expected_data['action']
assert deployment_status['document_url'] == expected_data['document_url']
assert deployment_status['user'] == expected_data['user']
assert deployment_status['date'] == expected_data['date']
get_href.assert_called_once_with(action['committed_rev_id'])
assert get_result.called
assert get_path.called # This means we created a DesignRefHelper object
@patch.object(action_helper.ActionsHelper, '_get_action_info')
@patch.object(action_helper.ActionsHelper, '_get_all_steps',
return_value=get_repeated_steps())
def test__get_latest_steps(get_all_steps, get_action_info):
helper = action_helper.ActionsHelper(action_id='irrelevant')
latest_steps_dict = helper._get_latest_steps()
assert latest_steps_dict['task_A']['try_number'] == 3
assert latest_steps_dict['task_B']['try_number'] == 1
assert latest_steps_dict['task_C']['try_number'] == 2
assert latest_steps_dict['task_D']['try_number'] == 1
assert latest_steps_dict['task_E']['try_number'] == 1
assert get_all_steps.called
assert get_action_info.called
@patch.object(action_helper.ActionsHelper, '_get_action_info')
@patch.object(action_helper.ActionsHelper, '_get_latest_steps',
return_value=get_fake_latest_step_dict_successful())
def test_get_result_from_dag_steps_success(get_latest_steps, get_action_info):
helper = action_helper.ActionsHelper(action_id='irrelevant')
result = helper.get_result_from_dag_steps()
assert result == 'successful'
assert get_latest_steps.called
assert get_action_info.called
@patch.object(action_helper.ActionsHelper, '_get_action_info')
@patch.object(action_helper.ActionsHelper, '_get_latest_steps',
return_value=get_fake_latest_step_dict_failed())
def test_get_result_from_dag_steps_failed(get_latest_steps, get_action_info):
helper = action_helper.ActionsHelper(action_id='irrelevant')
result = helper.get_result_from_dag_steps()
assert result == 'failed'
assert get_latest_steps.called
assert get_action_info.called
@patch.object(action_helper.ActionsHelper, '_get_action_info')
@patch.object(action_helper.ActionsHelper, '_get_latest_steps',
| |
Cont = False
while Cont == False:
index = 0
if DeckorCourse == "Deck":
for each in Deck_of_questions.Decknames:
if each.upper() == name.upper():
index+=1
else:
for each in Course.Coursenames:
if each.upper() == name.upper():
index+=1
if index >0:
name = input("There is already a "+DeckorCourse+" named that. Input a new name")
else:
return(name)
def CheckifCoureseempty(CourseAccessed):#Checks if a course is empty when doing the question test
EmptyDecks = 0
TotalDecks = 0
for each in CourseAccessed.Decks:
TotalDecks += 1
if each.Qlist == []:
EmptyDecks +=1
if TotalDecks == EmptyDecks:
return True
else:
return False
def Check_Repetition_number(number):#Used for when I ask for multuple decks to be made
cont = False
greaterthan0 = True
while cont == False:
if number.isdigit() == True:
if int(number) > 0:
cont = True
return number
else:
greaterthan0 = False
if greaterthan0 == False or number.isdigit() == False:
number = input("Sorry you have written the number incorrectly, it has to be a whole number > 0 ")
def DisplayCourses(Coursenames):#this was repeated a lot
print("Your courses available are.")
for each in Coursenames:
print(each)
def CheckFile(Filename,FileType):#I made this to stop errors happening if the file doesn't exist
cont = False
while cont == False:
try:
FileTest = open(Filename+FileType,"r")
FileTest.close()
return(Filename)
except:
Filename = input("There are no files in the folder with this name, remember to add it. ")
def Ask_one_deck_questions(CourseAccessed,QtoA):
print("The Decks within "+CourseAccessed.name+" set are")
for each in CourseAccessed.Decks:
if each.Qlist != []:
print(each.name)
Deckname = input("Which Deck do you want to access? ")
DeckAccessed = CourseAccessed.GetDeck(Deckname)
Repeat_Deck = True
Questions = []
Answers = []
QandAData = DeckAccessed.AddTestQandAlist(Answers,Questions)
if QtoA == "1":
Questions = QandAData[1]
Answers = QandAData[0]
else:
Questions = QandAData[0]
Answers = QandAData[1]
Maxno = len(Questions)
while Repeat_Deck == True:
Answer = input("There are "+str(Maxno)+" Questions. How many would you like to be tested on? ")
Cont = Check_Num_Validity(Answer,Maxno)
while Cont[0] == False:
if Cont[1] != "notnum":
Answer = input("There are "+str(Maxno)+" Questions.Your number was "+Cont[1]+"er than any number between 1-"+str(Maxno)+" ")
elif Cont[1] == "notnum":
Answer = input("There are "+str(Maxno)+" Questions.Your number was not a valid number between 1-"+str(Maxno)+" ")
Cont = Check_Num_Validity(Answer,Maxno)
QandAinfo =TestQuestions(Questions,Answers,int(Answer))
empty = QandAinfo[2]
Repeat = input("""Would you like to test yourself again on
1) This Deck
2) This Deck but without the questions you did this round
3) Another Deck within this course
4) Another Course
5) Quit
""")
Repeat = Check_Options(Repeat,5)
while empty == True and Repeat == "2":
Repeat = input("You can not choose option two as you have answered all the questions choose another option ")
Repeat = Check_Options(Repeat,4)
print(Repeat)
if Repeat == "1":
Questions = []
Answers = []
for each in DeckAccessed.Qlist:
Questions.append(each)
for each in DeckAccessed.Alist:
Answers.append(each)
Maxno = len(Questions)
Repeat_Deck == True
elif Repeat == "2":
Repeat_Deck = True
Questions = QandAinfo[0]
Answers = QandAinfo[1]
Maxno = len(Questions)
elif Repeat == "3":
Repeat_Deck = False
Repeat_Course = True
backtostart = False
return (Repeat_Course,backtostart)
elif Repeat == "4":
Repeat_Deck = False
Repeat_Course = False
backtostart = False
return (Repeat_Course,backtostart)
elif Repeat == "5":
Repeat_Deck = False
Repeat_Course = False
backtostart = True
return (Repeat_Course,backtostart)
def main():
run = True
index = 0
Coursenames = GetCourse()
CourseObjects = []
for each in Coursenames:
CourseObjects.append(Course(each.strip()))
CourseObjects[index].InstantiateSavedSets()
index+=1
while run == True:
if Coursenames == []:
Answer = input("Welcome to Omars Flashcard thing. You currently have no Courses saved , press one to add one, 2 to quit")
if Answer == "1":
Coursename = Checkname("Course")
CreateNewCourse(Coursenames,CourseObjects,Coursename)
Repetitions = input("How many Decks would you lke to add to this Course(must be greater than 0)?")
Cont = False
Repetitions = Check_Repetition_number(Repetitions)
for i in range(int(Repetitions)):
Deckname = Checkname("Deck")
CourseObjects[-1].CreateNewDeck(Deckname)
else:
run == False
else:
print("""Hello, Welcome to Omars Flashcard file saver.""")
DisplayCourses(Coursenames)
option = input("""Would you like to:
1)Create a new Course or rename a Course
2)Access one of you current Courses and edit the Decks within or add new Decks
3)Open a deck to test yourself on Questions
4)Test yourself on multiple decks
5)Import a file of questions and answers to a new or old deck
6)Delete Files#Does not work rn
7)Quit
press the corrosponding number.
""")
option = Check_Options(option,7)
if option == "1":
backtostart = False
while backtostart == False:
Answer = input("""Would you like to:
1)Add a course
2)Rename a Course
3)Quit
""")
if Answer == "1":
Coursename = Checkname("Course")# you can probablt put this in teh function below
CreateNewCourse(Coursenames,CourseObjects,Coursename)
Repetitions = input("How many Decks would you lke to add to this Course(must be greater than 0)?")
Cont = False
Repetitions = Check_Repetition_number(Repetitions)
for i in range(int(Repetitions)):
Deckname = Checkname("Deck")
CourseObjects[-1].CreateNewDeck(Deckname)
elif Answer == "2":
pass
else:
backtostart = True
if option == '2':
backtostart = False
while backtostart == False:
DisplayCourses(Coursenames)
AccessCourse = input("Which Course would you like to Access?")
CourseAccessed = Access_Course(AccessCourse,Coursenames,CourseObjects)
Samecourse = True
while Samecourse == True:
print("The Decks within "+CourseAccessed.name+" set are")
for each in CourseAccessed.Decks:#shows the Deck within the Course
print(each.name)
Answer = input("""Would you like to:
1) Add Decks
2) Edit a Decks questions and answers
3) Access a different course
4) Quit to main menu
""")
Answer = Check_Options(Answer,4)
if Answer == '1':
Repetitions = input("How many Decks would you like to add?")
Repetitions = Check_Repetition_number(Repetitions)
for i in range(int(Repetitions)):
Deckname = Checkname("Deck")
CourseAccessed.CreateNewDeck(Deckname)
elif Answer == '2':
backtooptionmenu = False
while backtooptionmenu == False:
Deckname = input("Which Deck do you want to access?")
DeckAccessed = CourseAccessed.GetDeck(Deckname)
if DeckAccessed.Alist == []:
Repetitions = input("There are no questions in this Deck, how many do you want to add?(must be >0")
Repetitions = Check_Repetition_number(Repetitions)
for i in range(int(Repetitions)):
Question = input("What is the question")
Answr = input("What is the Answer")
DeckAccessed.AddQandA(Question,Answr)
DeckAccessed.SaveQandA()
SameDeck = True
while SameDeck == True:
print("The questions and answers within " +DeckAccessed.name +" are:")
index = 0
for each in DeckAccessed.Qlist:
print( str(index+1)+")"+each+" "+DeckAccessed.Alist[index])
index += 1
Options = input("""Would you like to:
1)Edit a question
2)Edit an answer
3)Add a question and answer
4)Delete a Question and Answer
5)Goback to course menu to edit a different Deck or course
""")
Options = Check_Options(Options,5)
if Options == "1":
Editno = input("which question number would you like to edit? ")
Editno = Get_Answer_or_Question_num(DeckAccessed,Editno)
Edit = input("What would you like to change "+DeckAccessed.Qlist[Editno]+" to? ")
DeckAccessed.Qlist[Editno] = Edit
DeckAccessed.SaveQandA()
elif Options == "2":
Editno = input("which answer number would you like to edit? ")
Editno = Get_Answer_or_Question_num(DeckAccessed,Editno)
Edit = input("What would you like to change "+DeckAccessed.Alist[Editno]+" to? ")
DeckAccessed.Alist[Editno] = Edit
DeckAccessed.SaveQandA()
elif Options == "3":
repetitions= int(input("How many questions and answers would you like to add?"))
for i in range(repetitions):
Question = input("What is the question ")
Answer = input("What is the Answer ")
DeckAccessed.AddQandA(Question,Answer)
DeckAccessed.SaveQandA()
elif Options == "4":
IndextoDelete = input("Which Question number do you want to delete? ")
IndextoDelete = Get_Answer_or_Question_num(DeckAccessed,IndextoDelete)
DeckAccessed.DeleteQandA(int(IndextoDelete))
elif Options == "6":
SameDeck = False
backtooptionmenu = True
elif Answer == '3':
Samecourse = False
else:
Samecourse = False
backtostart = True
backtooptionmenu = True
if option == "3":
empty = False
backtostart = False
while backtostart == False:
DisplayCourses(Coursenames)
AccessCourse = input("Which Course would you like to Access? ")
CourseAccessed = Access_Course(AccessCourse,Coursenames,CourseObjects)
Repeat_Course = True
Empty = CheckifCoureseempty(CourseAccessed)
if Empty == True:
Repeat_Course = False
Goback = input("""That Course is empty. Would you like to
1)Choose a different course
2)Quit
""")
Check_Options(Goback,2)
if Goback == "2":
backtostart = True
QtoA = input("""Would you like to test yourself on:
1)Questions to Answers
2)Answers to Questions
""")
QtoA = Check_Options(QtoA,2)
while Repeat_Course == True:
RepeatFactors = Ask_one_deck_questions(CourseAccessed,QtoA)
Repeat_Course = RepeatFactors[0]
backtostart = RepeatFactors[1]
elif option =="4":
backtostart = False
AccessedCourse = False
while backtostart == False:
while AccessedCourse == False:#This was added because of the Checkempty as I want the program to come back here
# if the course is empty. If I did not have the while loop I could not make RepeatDeck False as it would coninue the program to there (Can you tell I wrote | |
at arbitrarily spaced time intervals
given by "time"
Python conversion of dfourt.pro http://www.arm.ac.uk/~csj/idl/CLEAN/
:param time: numpy array or list, input time(independent) vector
:param data: numpy array or list, input dependent vector
:param df: float, frequency increment for the FT (default: 1/T)
:param fmax: float, max frequency in the FT (default: 1/min(dt))
:param ppb: float, points per restoring beam (default: 4)
:param log: boolean, if True prints progress to standard output
if False silent
The frequency grid, "freq", on which the spectral window function "wfn"
and "dft" are computed, controlled by "df", "fmax" and "ppb".
Note that this implementation is completely general, and therefore slow,
since it cannot make use of the timing enhancements of the FFT.
The IDL implementation of the DFT is based on a suite of FORTRAN routines
developed by Roberts et al. For more information concerning the algorithm,
please refer to:
<NAME>., <NAME>., & <NAME>. 1987, AJ, 93, 968
"Time Series Analysis with CLEAN. I. Derivation of a Spectrum"
Note 1: The frequency resolution element "df" is oversampled by "ppb" to
ensure accurate determination of the location of peaks in the
Fourier Transform.
Note 2: T = total time spanned = max(time) - min(time)
Note 3: dt = 2. * [minimum time separation]
:return freq: numpy array of floats, frequency vector
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
---------------------------------------------------------------------------
clean:
---------------------------------------------------------------------------
This function returns an estimate for the component ALPHA, which produces
the DFT at frequency index L via the relation below
Python conversion of cl_alpha.pro http://www.arm.ac.uk/~csj/idl/CLEAN/
:param wfn: numpy array of complex numbers, spectral window function
:param dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
:param l: int, the current maximum in dft
:param err: float, the allowed error in "wnorm"
:return alpha: complex number, the amplitude "alpha" in
equations below
Relation:
dft(l) = alpha*wfn(0) + conj(alpha) * wfn(2*l)
alpha is given by:
dft(l) - conj(dft(l))*wfn(2*l)
alpha = ------------------------------
wnorm
where:
wnorm = 1 - abs(wfn(2*l))^2
See Section III b) [especially equation (24)] of Roberts et al.
(1987, AJ, 93, 968).
HISTORY:
Jan. 91: translated for FORTRAN code by Roberts et al. [AWF, Bartol]
Apr. 96: recoded for efficiency and added documentation [AWF, USM]
"""
# -------------------------------------------------------------------------
# Deal with keyword arguments
for fname in ['frequency', 'freqs', 'freq']:
if fname in kwargs:
kwargs['freq'] = kwargs[fname]
freq = kwargs.get('freq', None)
df = kwargs.get('df', None)
fmax = kwargs.get('fmax', None)
ppb = kwargs.get('ppb', 4)
gain = kwargs.get('gain', 0.5)
ncl = kwargs.get('ncl', 100)
log = kwargs.get('log', False)
full = kwargs.get('full', False)
use = kwargs.get('use', USE)
# -------------------------------------------------------------------------
# Use the default frequency parameters to describe the frequency grid.
# The defaults are selected by leaving df, fmax and ppb out of the function
if freq is None:
freq = dfourt(time, data, df, fmax, ppb, log)
# input frequencies must be in order from low to high!
else:
freq = np.sort(freq)
# -------------------------------------------------------------------------
# Compute the "dirty" discrete Fourier transform.
start1 = 0.0
if log:
print('\n Computing "dirty" discrete Fourier transform...')
start1 = tt.time()
wfn, dft = run_discrete_fourier_transform(freq, time, data, log, use)
if log:
end1 = tt.time()
print('\n\t Took {0} s'.format(end1 - start1))
# -------------------------------------------------------------------------
# Clean the DFT. For this demonstration, use a gain of 0.5 and continue
# for 100 iterations
start2 = 0.0
if log:
print('\n Computing clean periodogram...')
start2 = tt.time()
cdft = clean(freq, wfn, dft, gain, ncl, log)
if log:
end2 = tt.time()
print('\n\t Took {0} s'.format(end2 - start2))
# -------------------------------------------------------------------------
# If full return frequency, the spectral window function, the dirty DFT
# and the CLEANed DFT
if full:
return freq, wfn, dft, cdft
# else return only the CLEANed DFT
else:
return cdft
def plot_test_graph(time, data, freq, cdft, logged=True):
"""
Plots a matplotlib test plot of the raw data and the CLEANed periodogram
compares it to a lombscargle periodogram using the same frequencies
:param time: numpy array or list, input time(independent) vector
:param data: numpy array or list, input dependent vector
:param freq: frequency vector
:param cdft: numpy array of complex numbers, the CLEANed periodogram
:param logged: boolean, whether to log the x axis on frequency and time
graphs
:return:
"""
# Calculate lombscargle
from astropy.stats import LombScargle
power = LombScargle(time, data).power(freq)
# plot
import matplotlib.pyplot as plt
plt.close()
fig, frame = plt.subplots(ncols=1, nrows=3)
# plot data
frame[0].scatter(time, data, s=5)
# plot lombscargle
frame[1].plot(freq, power/np.nanmax(power),
color='red', label='LombScargle')
# plot clean periodogram
amps = np.array(2.0*abs(cdft))
frame[1].plot(freq[0: len(cdft)], amps/np.nanmax(amps),
color='b', label='CLEAN perioogram')
# plot lombscargle
frame[2].plot(1.0/freq, power/np.nanmax(power),
color='red', label='LombScargle')
# plot clean periodogram
amps = np.array(2.0*abs(cdft))
frame[2].plot(1.0/freq[0: len(cdft)], amps/np.nanmax(amps),
color='b', label='CLEAN perioogram')
# finalise graph
frame[0].set_xlabel('Time')
frame[0].set_xlabel('Flux')
frame[1].set_xlabel('Frequency')
frame[1].set_ylabel('Normalised Amplitude/Power Spectrum')
frame[1].legend(loc=1, numpoints=1, scatterpoints=1)
frame[2].set_xlabel('Time / days')
frame[2].set_ylabel('Normalised Amplitude/Power Spectrum')
frame[2].legend(loc=1, numpoints=1, scatterpoints=1)
if logged:
frame[1].set_xscale('log')
frame[2].set_xscale('log')
plt.show()
plt.close()
def __tqdmlog__(x_input, log):
"""
Private function for dealing with logging
:param x_input: any iterable object
:param log: bool, if True and module tqdm exists use logging
:return:
"""
# deal with importing tqdm
try:
from tqdm import tqdm
except ModuleNotFoundError:
tqdm = (lambda x: x)
# deal with logging
if log:
rr = tqdm(x_input)
else:
rr = x_input
return rr
def quantile_1D(data, weights, quantile):
"""
Compute the weighted quantile of a 1D numpy array.
Taken from:
https://github.com/nudomarinero/wquantiles/blob/master/wquantiles.py
Parameters
----------
data : ndarray
Input array (one dimension).
weights : ndarray
Array with the weights of the same size of `data`.
quantile : float
Quantile to compute. It must have a value between 0 and 1.
Returns
-------
quantile_1D : float
The output value.
"""
# Check the data
if not isinstance(data, np.matrix):
data = np.asarray(data)
if not isinstance(weights, np.matrix):
weights = np.asarray(weights)
nd = data.ndim
if nd != 1:
raise TypeError("data must be a one dimensional array")
ndw = weights.ndim
if ndw != 1:
raise TypeError("weights must be a one dimensional array")
if data.shape != weights.shape:
raise TypeError("the length of data and weights must be the same")
if ((quantile > 1.) or (quantile < 0.)):
raise ValueError("quantile must have a value between 0. and 1.")
# Sort the data
ind_sorted = np.argsort(data)
sorted_data = data[ind_sorted]
sorted_weights = weights[ind_sorted]
# Compute the auxiliary arrays
Sn = np.cumsum(sorted_weights)
# TODO: Check that the weights do not sum zero
#assert Sn != 0, "The sum of the weights must not be zero"
Pn = (Sn-0.5*sorted_weights)/np.sum(sorted_weights)
# Get the value of the weighted median
return np.interp(quantile, Pn, sorted_data)
def bin_data(time, data, edata=None, binsize=None, log=False):
"""
Bin time and data vectors by binsize (using a median combine of points in
each bin (weight median if edata is not None).
:param time: numpy array or list, input time(independent) vector
:param data: numpy array or list, input dependent vector
:param edata: None or numpy array, uncertainties associated with "data"
:param binsize: float, size of each bin (in units of "time")
:param log: boolean, if True prints progress to standard output
if False silent
:return binnedtime: numpy array, binned "time" array
:return binneddata: numpy array, binned "data" array
"""
# Deal with bin size, if None, rebin to 1000 elements or don't bin
# if len(time) is less than 1000
if binsize is None:
maxbins = np.min(len(time), 1000)
bins = np.linspace(min(time), max(time), maxbins)
else:
bins = np.arange(min(time), max(time), binsize)
# remove nans
nanmask = np.isfinite(time) & np.isfinite(data)
time, data = time[nanmask], data[nanmask]
# Now bin the data
binnedtime = []
binneddata = []
# Loop round each bin and median the time and the data for all values
# within that bin
if log:
print('\n\t Binning data...')
for bin in __tqdmlog__(bins, log):
# mask values within this iteration bin
mask = (time >= bin) & (time < bin+binsize)
# if there are no values in this bin do not bin it
if np.sum(mask) == 0:
continue
# if there are values in this bin | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
'name': "LoopTools",
'author': "<NAME>",
'version': (3, 2, 0),
'blender': (2, 5, 7),
'api': 35979,
'location': "View3D > Toolbar and View3D > Specials (W-key)",
'warning': "",
'description': "Mesh modelling toolkit. Several tools to aid modelling",
'wiki_url': "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
"Scripts/Modeling/LoopTools",
'tracker_url': "http://projects.blender.org/tracker/index.php?"\
"func=detail&aid=26189",
'category': 'Mesh'}
import bpy
import mathutils
import math
##########################################
####### General functions ################
##########################################
# used by all tools to improve speed on reruns
looptools_cache = {}
# force a full recalculation next time
def cache_delete(tool):
if tool in looptools_cache:
del looptools_cache[tool]
# check cache for stored information
def cache_read(tool, object, mesh, input_method, boundaries):
# current tool not cached yet
if tool not in looptools_cache:
return(False, False, False, False, False)
# check if selected object didn't change
if object.name != looptools_cache[tool]["object"]:
return(False, False, False, False, False)
# check if input didn't change
if input_method != looptools_cache[tool]["input_method"]:
return(False, False, False, False, False)
if boundaries != looptools_cache[tool]["boundaries"]:
return(False, False, False, False, False)
modifiers = [mod.name for mod in object.modifiers if mod.show_viewport \
and mod.type == 'MIRROR']
if modifiers != looptools_cache[tool]["modifiers"]:
return(False, False, False, False, False)
input = [v.index for v in mesh.vertices if v.select and not v.hide]
if input != looptools_cache[tool]["input"]:
return(False, False, False, False, False)
# reading values
single_loops = looptools_cache[tool]["single_loops"]
loops = looptools_cache[tool]["loops"]
derived = looptools_cache[tool]["derived"]
mapping = looptools_cache[tool]["mapping"]
return(True, single_loops, loops, derived, mapping)
# store information in the cache
def cache_write(tool, object, mesh, input_method, boundaries, single_loops,
loops, derived, mapping):
# clear cache of current tool
if tool in looptools_cache:
del looptools_cache[tool]
# prepare values to be saved to cache
input = [v.index for v in mesh.vertices if v.select and not v.hide]
modifiers = [mod.name for mod in object.modifiers if mod.show_viewport \
and mod.type == 'MIRROR']
# update cache
looptools_cache[tool] = {"input": input, "object": object.name,
"input_method": input_method, "boundaries": boundaries,
"single_loops": single_loops, "loops": loops,
"derived": derived, "mapping": mapping, "modifiers": modifiers}
# calculates natural cubic splines through all given knots
def calculate_cubic_splines(mesh_mod, tknots, knots):
# hack for circular loops
if knots[0] == knots[-1] and len(knots) > 1:
circular = True
k_new1 = []
for k in range(-1, -5, -1):
if k - 1 < -len(knots):
k += len(knots)
k_new1.append(knots[k-1])
k_new2 = []
for k in range(4):
if k + 1 > len(knots) - 1:
k -= len(knots)
k_new2.append(knots[k+1])
for k in k_new1:
knots.insert(0, k)
for k in k_new2:
knots.append(k)
t_new1 = []
total1 = 0
for t in range(-1, -5, -1):
if t - 1 < -len(tknots):
t += len(tknots)
total1 += tknots[t] - tknots[t-1]
t_new1.append(tknots[0] - total1)
t_new2 = []
total2 = 0
for t in range(4):
if t + 1 > len(tknots) - 1:
t -= len(tknots)
total2 += tknots[t+1] - tknots[t]
t_new2.append(tknots[-1] + total2)
for t in t_new1:
tknots.insert(0, t)
for t in t_new2:
tknots.append(t)
else:
circular = False
# end of hack
n = len(knots)
if n < 2:
return False
x = tknots[:]
locs = [mesh_mod.vertices[k].co[:] for k in knots]
result = []
for j in range(3):
a = []
for i in locs:
a.append(i[j])
h = []
for i in range(n-1):
if x[i+1] - x[i] == 0:
h.append(1e-8)
else:
h.append(x[i+1] - x[i])
q = [False]
for i in range(1, n-1):
q.append(3/h[i]*(a[i+1]-a[i]) - 3/h[i-1]*(a[i]-a[i-1]))
l = [1.0]
u = [0.0]
z = [0.0]
for i in range(1, n-1):
l.append(2*(x[i+1]-x[i-1]) - h[i-1]*u[i-1])
if l[i] == 0:
l[i] = 1e-8
u.append(h[i] / l[i])
z.append((q[i] - h[i-1] * z[i-1]) / l[i])
l.append(1.0)
z.append(0.0)
b = [False for i in range(n-1)]
c = [False for i in range(n)]
d = [False for i in range(n-1)]
c[n-1] = 0.0
for i in range(n-2, -1, -1):
c[i] = z[i] - u[i]*c[i+1]
b[i] = (a[i+1]-a[i])/h[i] - h[i]*(c[i+1]+2*c[i])/3
d[i] = (c[i+1]-c[i]) / (3*h[i])
for i in range(n-1):
result.append([a[i], b[i], c[i], d[i], x[i]])
splines = []
for i in range(len(knots)-1):
splines.append([result[i], result[i+n-1], result[i+(n-1)*2]])
if circular: # cleaning up after hack
knots = knots[4:-4]
tknots = tknots[4:-4]
return(splines)
# calculates linear splines through all given knots
def calculate_linear_splines(mesh_mod, tknots, knots):
splines = []
for i in range(len(knots)-1):
a = mesh_mod.vertices[knots[i]].co
b = mesh_mod.vertices[knots[i+1]].co
d = b-a
t = tknots[i]
u = tknots[i+1]-t
splines.append([a, d, t, u]) # [locStart, locDif, tStart, tDif]
return(splines)
# calculate a best-fit plane to the given vertices
def calculate_plane(mesh_mod, loop, method="best_fit", object=False):
# getting the vertex locations
locs = [mesh_mod.vertices[v].co.copy() for v in loop[0]]
# calculating the center of masss
com = mathutils.Vector()
for loc in locs:
com += loc
com /= len(locs)
x, y, z = com
if method == 'best_fit':
# creating the covariance matrix
mat = mathutils.Matrix(((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
))
for loc in locs:
mat[0][0] += (loc[0]-x)**2
mat[0][1] += (loc[0]-x)*(loc[1]-y)
mat[0][2] += (loc[0]-x)*(loc[2]-z)
mat[1][0] += (loc[1]-y)*(loc[0]-x)
mat[1][1] += (loc[1]-y)**2
mat[1][2] += (loc[1]-y)*(loc[2]-z)
mat[2][0] += (loc[2]-z)*(loc[0]-x)
mat[2][1] += (loc[2]-z)*(loc[1]-y)
mat[2][2] += (loc[2]-z)**2
# calculating the normal to the plane
normal = False
try:
mat.invert()
except:
if sum(mat[0]) == 0.0:
normal = mathutils.Vector((1.0, 0.0, 0.0))
elif sum(mat[1]) == 0.0:
normal = mathutils.Vector((0.0, 1.0, 0.0))
elif sum(mat[2]) == 0.0:
normal = mathutils.Vector((0.0, 0.0, 1.0))
if not normal:
itermax = 500
iter = 0
vec = mathutils.Vector((1.0, 1.0, 1.0))
vec2 = (mat * vec)/(mat * vec).length
while vec != vec2 and iter<itermax:
iter += 1
vec = vec2
vec2 = (mat * vec)/(mat * vec).length
normal = vec2
elif method == 'normal':
# averaging the vertex normals
v_normals = [mesh_mod.vertices[v].normal for v in loop[0]]
normal = mathutils.Vector()
for v_normal in v_normals:
normal += v_normal
normal /= len(v_normals)
normal.normalize()
elif method == 'view':
# calculate view normal
rotation = bpy.context.space_data.region_3d.view_matrix.to_3x3().\
inverted()
normal = rotation * mathutils.Vector((0.0, 0.0, 1.0))
if object:
normal = object.matrix_world.inverted().to_euler().to_matrix() * \
normal
return(com, normal)
# calculate splines based on given interpolation method (controller function)
def calculate_splines(interpolation, mesh_mod, tknots, knots):
if interpolation == 'cubic':
splines = calculate_cubic_splines(mesh_mod, tknots, knots[:])
else: # interpolations == 'linear'
splines = calculate_linear_splines(mesh_mod, tknots, knots[:])
return(splines)
# check loops and only return valid ones
def check_loops(loops, mapping, mesh_mod):
valid_loops = []
for loop, circular in loops:
# loop needs to have at least 3 vertices
if len(loop) < 3:
continue
# loop needs at least 1 vertex in the original, non-mirrored mesh
if mapping:
all_virtual = True
for vert in loop:
if mapping[vert] > -1:
all_virtual = False
break
if all_virtual:
continue
# vertices can not all be at the same location
stacked = True
for i in range(len(loop) - 1):
if (mesh_mod.vertices[loop[i]].co - \
mesh_mod.vertices[loop[i+1]].co).length > 1e-6:
stacked = False
break
if stacked:
continue
# passed all tests, loop is valid
valid_loops.append([loop, circular])
return(valid_loops)
# input: mesh, output: dict with the edge-key as key and face-index as value
def dict_edge_faces(mesh):
edge_faces = dict([[edge.key, []] for edge in mesh.edges if not edge.hide])
for face in mesh.faces:
if face.hide:
continue
for key in face.edge_keys:
edge_faces[key].append(face.index)
return(edge_faces)
# input: mesh (edge-faces optional), output: dict with face-face connections
def dict_face_faces(mesh, edge_faces=False):
if not edge_faces:
edge_faces = dict_edge_faces(mesh)
connected_faces = dict([[face.index, []] for face in mesh.faces if \
not face.hide])
for face in mesh.faces:
if face.hide:
continue
for edge_key in face.edge_keys:
for connected_face in edge_faces[edge_key]:
if connected_face == face.index:
continue
connected_faces[face.index].append(connected_face)
return(connected_faces)
# input: mesh, output: dict with the vert index as | |
# coding=utf-8
import logging
import os
import random
import re
import time
import urllib
from datetime import datetime
from flask import (abort, flash, g, jsonify, redirect, render_template,
request, send_from_directory, session, url_for)
from flask_babel import gettext
from flask_login import current_user, login_required, login_user, logout_user
from flask_sqlalchemy import get_debug_queries
from guess_language import guessLanguage
from sqlalchemy.sql import or_
from sqlalchemy import desc
from werkzeug.utils import secure_filename
from app import app, babel, db, lm
from config import (DATABASE_QUERY_TIMEOUT, LANGUAGES, MAX_SEARCH_RESULTS,
PC_GEETEST_ID, PC_GEETEST_KEY, POSTS_PER_PAGE)
from data import *
from geetest import GeetestLib
from .emails import captcha_email, follower_notification
from .forms import (ContactForm, DatasetForm, EditForm, EmailCaptcha,
GetbackPasswd, LoginForm, PostForm, RegistrationForm,
SearchForm, SmsCaptcha, EditDatasetForm, flash_form_errors,
GroupForm, InvitationCode)
from .models import Category, Contact, Dataset, DatasetHistory, Post, User, Application,Group,GroupApplication,EventLog,GroupInvitation
from .sms import rand_sms_captcha, send_sms
from .translate import microsoft_translate
from event_log import EventLogs,get_now_str,get_all_logs,get_dataset_logs
from . import options
import os
import random
import time
import json
import logging
import urllib
@lm.token_loader
def get_auth_token():
return User.query.get(int(id))
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(LANGUAGES.keys())
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
g.search_form = SearchForm()
g.locale = get_locale()
@app.after_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= DATABASE_QUERY_TIMEOUT:
app.logger.warning(
"SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" %
(query.statement, query.parameters, query.duration,
query.context))
return response
@app.errorhandler(403)
def not_allow_error(error):
return render_template('403.html'), 403
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def home():
EventLogs.view_home()
return render_template('dmp-index.html')
@app.route('/core', methods=['GET', 'POST'])
def core():
return render_template('home.html')
@app.route('/dmp')
def dmp():
return render_template('dmp-index.html')
@app.route('/dmp/features')
def dmp_features():
return render_template('dmp-features.html')
@app.route('/admin')
def admin():
return render_template('base-admin.html')
@app.route('/bcp')
def bcp():
return render_template('bcp.html')
@app.route('/dataset')
@app.route('/datasets')
def dataset():
return redirect('/datasets/search')
@app.route('/datasets/search')
@app.route('/datasets/search', methods=['GET', 'POST'])
@app.route('/datasets/search/<int:page>', methods=['GET', 'POST'])
def dataset_search(page=1):
parent_cates = Category.query.filter(
Category.parent_id == 0).order_by(Category.type).all()
cates = Category.query.filter(
Category.parent_id > 0).order_by(desc(Category.sort)).all()
cates_map = {}
category_count(cates)
for c in cates:
cates_map[str(c.id)] = c.name
cate_url = request.args.get('cate', '')
cate_args = cate_url.split(',') if cate_url else []
cate_parent_map = {
10001: u'authority',
11001: u'subject',
12001: u'datatype',
13001: u'expert',
14001: u'kind',
15001: u'org'
}
cate_obj = {}
cate_parent_ids = []
for a in cate_args:
if a:
p, c = a.split(':')
cate_obj[cate_parent_map[int(p)]] = c
cate_parent_ids.append(int(p))
if cate_parent_ids:
parent_cates = [p for p in parent_cates if p.id not in cate_parent_ids]
search = request.args.get('search', '').encode('utf-8')
sort = request.args.get('sort', 'updated')
datasets = Dataset.query.filter(Dataset.title.like('%'+ search.decode('utf-8') + '%'))
for k in cate_obj:
if cate_obj[k]:
datasets = datasets.filter(
getattr(Dataset, k + '_id') == cate_obj[k])
datasets = datasets.order_by(desc(
sort if sort in ['published', 'updated'] else 'updated'))
pagination = datasets.paginate(page, POSTS_PER_PAGE, False)
datasets = datasets.paginate(page, POSTS_PER_PAGE, False)
EventLogs.view_dataset_list(page_num=page,sort_type=sort,cate=cate_args,keywords=search,dataset_num=1)
return render_template('dataset-search.html',
cates=cates,
parent_cates=parent_cates,
datasets=datasets,
cate_obj=cate_obj,
cate_url=cate_url,
cate_args=cate_args,
search=search,
sort=sort,
cates_map=cates_map,
pagination=pagination)
def category_count(category_list):
for l in category_list:
cate = Category.query.filter(Category.id == l.id).first()
if str(l.parent_id) == '10001':
cate.sort=Dataset.query.filter(Dataset.authority_id == l.id).count()
db.session.add(cate)
elif str(l.parent_id) == '11001':
cate.sort = Dataset.query.filter(Dataset.subject_id == l.id).count()
elif str(l.parent_id) == '12001':
cate.sort = Dataset.query.filter(Dataset.datatype_id == l.id).count()
elif str(l.parent_id) == '13001':
cate.sort = Dataset.query.filter(Dataset.expert_id == l.id).count()
elif str(l.parent_id) == '14001':
cate.sort = Dataset.query.filter(Dataset.kind_id == l.id).count()
elif str(l.parent_id) == '15001':
cate.sort = Dataset.query.filter(Dataset.org_id == l.id).count()
db.session.commit()
@app.route('/datasets/item/<path:id>')
@app.route('/datasets/item/<path:id>/meta')
def dataset_item_meta(id):
dataset = Dataset.query.filter_by(udi=id).first()
#统计数据集访问量
if dataset.views == None:
dataset.views = 1
dataset.views += 1
if dataset.signature == None:
file_path = os.path.join(
app.config['UPLOAD_FOLDER'], dataset.datasource
)
dataset.signature = ds_sha2(file_path)
db.session.add(dataset)
db.session.commit()
EventLogs.view_dataset_detail(dataset_id=id,sub_uri='meta')
fileFull = dataset.datasource.split('/').pop()
fileName = dataset.datasource.split('/').pop().split('.').pop(0)
fileExt = dataset.datasource.split('/').pop().split('.').pop()
ext = app.config['DATASET_FILE_EXTENSION']
return render_template('dataset-meta.html', dataset=dataset, fileFull=fileFull, fileName=fileName, fileExt=fileExt, ext=ext)
@app.route('/datasets/item/<path:id>/notify')
def dataset_item_notify(id):
dataset = Dataset.query.filter_by(udi=id).first()
# EventLogs.view_dataset_detail(dataset_id=id, sub_uri='notify')
fileFull = dataset.datasource.split('/').pop()
fileName = dataset.datasource.split('/').pop().split('.').pop(0)
fileExt = dataset.datasource.split('/').pop().split('.').pop()
ext = app.config['DATASET_FILE_EXTENSION']
return render_template('dataset-notify.html', dataset=dataset, fileFull=fileFull, fileName=fileName, fileExt=fileExt, ext=ext)
@app.route('/datasets/item/<path:id>/preview')
def dataset_item_preview(id):
dataset = Dataset.query.filter_by(udi=id).first()
EventLogs.view_dataset_detail(dataset_id=id, sub_uri='preview')
fileFull = dataset.datasource.split('/').pop()
fileName = dataset.datasource.split('/').pop().split('.').pop(0)
fileExt = dataset.datasource.split('/').pop().split('.').pop()
ext = app.config['DATASET_FILE_EXTENSION']
return render_template('dataset-preview.html', dataset=dataset, fileFull=fileFull, fileName=fileName, fileExt=fileExt, ext=ext)
@app.route('/datasets/item/<path:id>/analysis')
def dataset_item_analysis(id):
dataset = Dataset.query.filter_by(udi=id).first()
EventLogs.view_dataset_detail(dataset_id=id, sub_uri='analysis')
return render_template('dataset-analysis.html', dataset=dataset)
@app.route('/datasets/item/<path:id>/download')
def dataset_item_download(id):
dataset = Dataset.query.filter_by(udi=id).first()
dataset_his = DatasetHistory.query.filter_by(
udi=id).order_by(DatasetHistory.his_id.desc()).all()
EventLogs.view_dataset_detail(dataset_id=id, sub_uri='download')
fileFull = dataset.datasource.split('/').pop()
fileName = dataset.datasource.split('/').pop().split('.').pop(0)
fileExt = dataset.datasource.split('/').pop().split('.').pop()
logs_data = get_dataset_logs(dataset_id=id)
for log in logs_data[:5]:
for i in log:
print i," : ",log[i]
print '*'*10
ext = app.config['DATASET_FILE_EXTENSION']
return render_template('dataset-download.html', dataset=dataset, dataset_his=dataset_his, fileFull=fileFull, fileName=fileName, fileExt=fileExt, ext=ext, logs_data=logs_data)
@app.route('/datasets/item/<path:id>/rq')
def dataset_item_rq_anay(id):
dataset = Dataset.query.filter_by(udi=id).first()
uploads = os.path.join(
app.config['UPLOAD_FOLDER'], dataset.datasource
)
EventLogs.view_dataset_detail(dataset_id=id,sub_uri='rq')
return redirect('/demo/raqsoft/guide/jsp/analyse.jsp?dfxParams=csvFile='+uploads)
@app.route('/datasets/item/<path:id>/auth')
@login_required
def dataset_item_auth(id):
dataset = Dataset.query.filter_by(udi=id).first()
apply_waiting=dataset.get_waiting_applications()
applied=dataset.get_applied_applications()
EventLogs.view_dataset_detail(dataset_id=id, sub_uri='auth')
return render_template('dataset-auth.html',dataset=dataset,applied=applied,apply_waiting=apply_waiting)
@app.route('/datasets/item/<path:id>/download/<string:path>/<string:prefix>/<string:surfix>/<string:timestamp>/<string:filename>/version/<int:version>', methods=['GET', 'POST'])
@app.route('/datasets/item/<path:id>/download/<string:path>/<string:prefix>/<string:surfix>/<string:timestamp>/<string:filename>', methods=['GET', 'POST'])
@login_required
def dataset_download(id, path, prefix, surfix, timestamp, filename, version=None):
dataset = Dataset.query.filter_by(udi=id).first()
if not dataset or not dataset.is_authorized(g.user):
return abort(403)
if version:
dataset_his = DatasetHistory.query.filter(
DatasetHistory.udi == id, DatasetHistory.his_version == version).first()
if not dataset_his:
return abort(404)
uploads = os.path.join(
app.config['UPLOAD_FOLDER'], path, prefix, surfix, timestamp
)
EventLogs.view_dataset_detail(dataset_id=id, sub_uri='downlaoded',is_downloaded=True,uid_for_dataset=dataset.author_id)
return send_from_directory(directory=uploads, filename=filename,as_attachment=True)
@app.route('/raw')
def raw_index():
return render_template('raw/index.html')
@app.route('/raw/colors')
def raw_colors_template():
return render_template('raw/colors.html')
@app.route('/raw/dimensions')
def raw_dimensions_template():
return render_template('raw/dimensions.html')
@app.route('/register/set/password', methods=['GET', 'POST'])
def register_set_password():
form = RegistrationForm()
cellphone = session['cellphone']
if request.method == 'POST':
nickname = form.email.data.split('@')[0]
return validate_register(dict(nickname=nickname, email=form.email.data, password=form.password.data, cellphone=cellphone, confirm=form.confirm.data))
EventLogs.user_register(is_succeed=False)
return render_template('register.html', form=form,cellphone=cellphone)
@app.route('/register/success', methods=['GET'])
def register_success():
return render_template('register-success.html')
@app.route('/rest/get/code', methods=["GET"])
def get_code():
gt = GeetestLib(PC_GEETEST_ID, PC_GEETEST_KEY)
status = gt.pre_process()
session[gt.GT_STATUS_SESSION_KEY] = status
response_str = gt.get_response_str()
return response_str
def validate_register(resp):
form = SmsCaptcha()
if resp['password'] is None or resp['password'] == "":
flash(gettext(u'密码不能不为空.'))
return redirect(url_for('register_set_password'))
if not User.make_valid_cellphone(resp['cellphone']):
flash(gettext(u'手机号不能为空或手机号已被注册'))
return redirect(url_for('register_set_password'))
if not User.make_valid_email(email=resp['email']):
flash(gettext(u'邮箱不能为空或邮箱已被注册'))
return redirect(url_for('register_set_password'))
if resp['password'] != resp['confirm']:
flash(gettext(u'两次密码输入不一致'))
return redirect(url_for('register_set_password'))
if not re.match(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$', resp['email']):
flash(gettext(u'您的电子邮件格式不正确!'))
return redirect(url_for('register'))
if not re.match(r'(13\d|14[57]|15[^4,\D]|17[678]|18\d)\d{8}|170[059]\d{7}', resp['cellphone']) or len(resp['cellphone']) != 11:
flash(gettext(u'您的手机号格式不正确!'))
return redirect(url_for('register_set_password'))
user = User(nickname=resp['nickname'], cellphone=resp['cellphone'], email=resp['email'], password=resp['password'])
db.session.add(user)
db.session.commit()
login_user(user)
EventLogs.user_register(is_succeed=True, cellphone=resp['cellphone'], email=resp['cellphone'], register_time=get_now_str())
EventLogs.user_login(is_succeed=True, email=resp['email'], cellphone=resp['cellphone'])
return redirect(url_for('register_success'))
@app.route('/register/captcha/sms', methods=['GET', 'POST'])
def register_send_captcha_sms():
form = SmsCaptcha()
if request.method == 'POST':
cellphone = request.form['cellphone']
if cellphone == '':
flash(u'手机号不能为空!')
return render_template('register_send_sms.html', form=form)
if not User.make_valid_cellphone(cellphone):
flash(u'你的手机号已注册,请登录')
return redirect(url_for('login'))
#新加内容
session['cellphone'] = cellphone
return render_template('register_validate_sms.html', form=form)
#新加内容
return render_template('register_send_sms.html', form=form)
#取消验证码校验以及发送验证码过程
# captcha = rand_sms_captcha()
# gt = GeetestLib(PC_GEETEST_ID, PC_GEETEST_KEY)
# challenge = request.form[gt.FN_CHALLENGE]
# validate = request.form[gt.FN_VALIDATE]
# seccode = request.form[gt.FN_SECCODE]
# status = session[gt.GT_STATUS_SESSION_KEY]
# if status:
# result = gt.success_validate(challenge, validate, seccode)
# else:
# result = gt.failback_validate(challenge, validate, seccode)
# if result:
# send_sms(cellphone, captcha)
# session['captcha'] = captcha
# session['cellphone'] = cellphone
# return render_template('register_validate_sms.html', form=form)
# else:
# flash(gettext(u'验证码不正确!'))
# return render_template('register_send_sms.html', form=form)
# return render_template('register_send_sms.html', form=form)
@app.route('/register/captcha/sms/validate', methods=['GET', 'POST'])
def register_validate_captcha_sms():
form = SmsCaptcha()
if request.method == 'POST':
#新加内容
return redirect(url_for('register_set_password'))
#取消验证码校验过程
# if str(session['captcha']) == str(form.captcha.data):
# return redirect(url_for('register_set_password'))
# else:
# flash(u'验证码错误')
# return render_template('register_validate_sms.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
return validate_login({
"user_id": user_id,
"email": user_id,
"cellphone": user_id,
"password": password,
"remember_me": form.remember_me.data
})
EventLogs.user_login()
return render_template('login.html',
title=u'请登录',
form=form)
def validate_login(resp):
if resp['user_id'] is None or resp['user_id'] == "" or resp['password'] is None or resp['password'] == "":
flash(gettext('Invalid login. Please try again.'))
return redirect(url_for('login'))
user = User.query.filter(
or_(User.email == resp['email'], User.cellphone == resp['cellphone'])).first()
if user is None:
flash(gettext(u"账号不存在!请检查重试!"))
return redirect(url_for('login'))
user = User.query.filter(or_(User.email == resp['email'], User.cellphone == resp[
'cellphone']), User.password == resp['password']).first()
if user is None:
flash(gettext(u'密码错误!'))
return redirect(url_for('login'))
remember_me = False
if 'remember_me' in resp:
remember_me = resp['remember_me']
login_user(user, remember=remember_me)
EventLogs.user_login(is_succeed=True,email=resp['email'],cellphone=resp['cellphone'])
return redirect(request.args.get('next') or url_for('dataset'))
@app.route('/logout')
@login_required
def logout():
EventLogs.user_logout()
logout_user()
return redirect(url_for('dataset'))
@app.route('/forget')
def forget():
return render_template('forget.html')
@app.route('/about')
def about():
EventLogs.view_about()
return render_template('about-core.html')
@app.route('/about/lab')
def about_lab():
EventLogs.view_about(sub_uri='lab')
return render_template('about-lab.html')
@app.route('/about/center')
def about_center():
EventLogs.view_about(sub_uri='center')
return render_template('about-center.html')
@app.route('/about/team')
def about_team():
EventLogs.view_about(sub_uri='team')
return render_template('about-team.html')
@app.route('/join')
def join():
EventLogs.view_join()
return render_template('join.html')
def generate_udi():
return app.config["CORE_PREFIX_ID"] + '/' + str(int((time.time() - 3600 * 24 * 365 * 44) * 100))
@app.route('/datasets/add', methods=['GET', 'POST'])
@login_required
def dataset_add():
user = User.query.filter_by(id=g.user.id).first()
if not user.invitation_active:
return redirect(url_for('invitation_code_query'))
form = DatasetForm()
if form.validate_on_submit():
udi = generate_udi()
f = form.datasource.data
filedir = os.path.join(
app.config['UPLOAD_FOLDER'], 'files', udi, str(time.time()))
if not os.path.exists(filedir):
os.makedirs(filedir)
filename = filedir.split(
app.config['UPLOAD_FOLDER'])[-1] + '/' + secure_filename(f.filename)
file_path = os.path.join(
app.config['UPLOAD_FOLDER'], filename
)
f.save(file_path)
hash_obj = ds_sha2(file_path)
dataset = Dataset(
udi=udi,
title=request.form['title'],
datasource=filename,
author_id=g.user.id,
contact='',
desc=request.form['desc'],
signature = hash_obj,
authority_id=request.form['authority'],
kind_id=request.form['kind'],
expert_id=request.form['expert'],
subject_id=request.form['subject'],
datatype_id=request.form['datatype'],
org_id=request.form['org']
)
db.session.add(dataset)
db.session.commit()
EventLogs.create_dataset(dataset_id=udi,is_created=True)
CoreAPI.create_dataset(udi.split("/")[1],hash_obj)
flash(gettext('数据集已成功提交保存'))
return redirect(url_for('dataset_item_meta',id=udi))
elif request.method == "POST":
flash_form_errors(form)
EventLogs.create_dataset()
return render_template('dataset-add.html', form=form, action="Add")
@app.route('/invitation/code/query',methods=['GET','POST'])
@login_required
def invitation_code_query():
form = InvitationCode()
user = User.query.filter_by(id=g.user.id).first()
if request.method == 'POST':
invitation_code = request.form['invitation_code']
gt = GeetestLib(PC_GEETEST_ID, PC_GEETEST_KEY)
challenge = request.form[gt.FN_CHALLENGE]
validate = request.form[gt.FN_VALIDATE]
seccode = request.form[gt.FN_SECCODE]
status = session[gt.GT_STATUS_SESSION_KEY]
if status:
result = gt.success_validate(challenge, validate, seccode)
else:
result = gt.failback_validate(challenge, validate, seccode)
if result:
if invitation_code == app.config['INVITATION_CODE']:
user.invitation_active = True
db.session.add(user)
db.session.commit()
return redirect(url_for('dataset_add'))
else:
flash(u'你的邀请码无效')
return render_template('invitation_code_quert.html', form=form)
else:
flash(gettext(u'验证码不正确!'))
return render_template('invitation_code_quert.html', form=form)
return render_template('invitation_code_quert.html', form=form)
@app.route('/datasets/edit/<path:id>', methods=['GET', 'POST'])
@login_required
def dataset_edit(id):
dataset = Dataset.query.filter(
Dataset.udi == id, Dataset.author_id == g.user.id).first()
if not dataset.is_editable(g.user):
return abort(403)
form = EditDatasetForm(obj=dataset)
if form.validate_on_submit():
udi = dataset.udi
f = form.datasource.data
if f:
filedir = os.path.join(
app.config['UPLOAD_FOLDER'], 'files', udi, str(time.time()))
if not os.path.exists(filedir):
os.makedirs(filedir)
filename = filedir.split(
app.config['UPLOAD_FOLDER'])[-1] + '/' + secure_filename(f.filename)
file_path = os.path.join(
app.config['UPLOAD_FOLDER'], filename
)
f.save(file_path)
hash_obj = ds_sha2(file_path)
else:
filename = | |
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import copy
import lzma
import math
import os
import pickle
import struct
import time
from collections import namedtuple
from collections.abc import Sequence
from datetime import datetime
from enum import Enum
from typing import Dict, List, Union
import fasteners
from .targets import sos_targets
from .utils import (DelayedAction, env, expand_size, expand_time,
format_duration, format_HHMMSS, linecount_of_file,
pretty_size, sample_lines, short_repr, tail_of_file)
monitor_interval = 5
resource_monitor_interval = 60
class TaskParams(object):
"""A parameter object that encaptulates parameters sending to
task executors. This would makes the output of workers, especially
in the web interface much cleaner (issue #259)"""
def __init__(self, name, global_def, task, sos_dict, tags):
self.name = name
self.global_def = global_def
self.task = task
self.sos_dict = sos_dict
self.tags = tags
# remove builtins that could be saved in a dictionary
if "CONFIG" in self.sos_dict and "__builtins__" in self.sos_dict[
"CONFIG"]:
self.sos_dict["CONFIG"].pop("__builtins__")
def __repr__(self):
return self.name
class MasterTaskParams(TaskParams):
def __init__(self, num_workers=None):
self.ID = "t0"
self.name = self.ID
self.global_def = None
self.task = ""
self.sos_dict = {
"_runtime": {
"num_workers": num_workers
},
"_input": sos_targets(),
"_output": sos_targets(),
"_depends": sos_targets(),
"step_input": sos_targets(),
"step_output": sos_targets(),
"step_depends": sos_targets(),
"step_name": "",
"_index": 0,
}
self.tags = []
# a collection of tasks that will be executed by the master task
self.task_stack = []
def _parse_num_workers(self, num_workers):
# return number of nodes and workers
if isinstance(num_workers, Sequence) and len(num_workers) >= 1:
val = str(num_workers[0])
n_workers = val.rsplit(":", 1)[-1] if ":" in val else val
n_nodes = len(num_workers)
elif isinstance(num_workers, str):
n_workers = (
num_workers.rsplit(":", 1)[-1]
if ":" in num_workers else num_workers)
n_nodes = 1
elif isinstance(num_workers, int):
n_workers = num_workers
n_nodes = 1
elif num_workers is None:
n_workers = 1
n_nodes = 1
else:
raise RuntimeError(
f"Unacceptable value for parameter trunk_workers {num_workers}")
try:
n_workers = int(n_workers)
except Exception as e:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}") from e
if n_workers <= 0:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}")
return n_nodes, n_workers
def num_tasks(self):
return len(self.task_stack)
def push(self, task_id, params):
# update walltime, cores, and mem
# right now we require all tasks to have same resource requirment, which is
# quite natural because they are from the same step
#
# update input, output, and depends
#
# walltime etc
n_nodes, n_workers = self._parse_num_workers(
self.sos_dict["_runtime"]["num_workers"])
if not self.task_stack:
for key in (
"walltime",
"max_walltime",
"cores",
"nodes",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
"verbosity",
"sig_mode",
"run_mode",
):
if (key in params.sos_dict["_runtime"] and
params.sos_dict["_runtime"][key] is not None):
self.sos_dict["_runtime"][key] = params.sos_dict[
"_runtime"][key]
self.sos_dict["step_name"] = params.sos_dict["step_name"]
self.tags = params.tags
else:
for key in (
"walltime",
"max_walltime",
"cores",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
):
val0 = self.task_stack[0][1].sos_dict["_runtime"].get(key, None)
val = params.sos_dict["_runtime"].get(key, None)
if val0 != val:
raise ValueError(
f"All tasks should have the same resource {key}")
if val0 is None:
continue
# If there are multiple nodes and multiple workers, there are
# n_workers * n_nodes workers at the same time, so the jobs
# will be completed in n_batches
n_batches = math.ceil(
(len(self.task_stack) + 1) / (n_workers * n_nodes))
if key == "walltime":
# the real walltime would be the total time on one node
self.sos_dict["_runtime"]["walltime"] = format_HHMMSS(
n_batches * expand_time(val0))
elif key == "mem":
# number of columns * mem for each + 100M for master
self.sos_dict["_runtime"]["mem"] = n_workers * expand_size(
val0)
elif key == "cores":
self.sos_dict["_runtime"]["cores"] = n_workers * val0
elif key == "name":
self.sos_dict["_runtime"][
"name"] = f"{val0}_{len(self.task_stack) + 1}"
self.tags.extend(params.tags)
# if cores is unspecified but there are more than one workers
if ("cores" not in self.sos_dict["_runtime"] and
n_workers is not None and n_workers > 1):
self.sos_dict["_runtime"]["cores"] = n_workers
#
# input, output, preserved vars etc
for key in ["_input", "_output", "_depends"]:
if key in params.sos_dict and isinstance(params.sos_dict[key],
sos_targets):
if key == "__builtins__":
continue
# do not extend duplicated input etc
self.sos_dict[key].extend(params.sos_dict[key])
#
self.task_stack.append([task_id, params])
self.tags = sorted(list(set(self.tags)))
#
id_prefix = f't{len(self.task_stack)}'
self.ID = f"{id_prefix}{self.task_stack[0][0][:-(len(id_prefix))]}"
self.name = self.ID
def finalize(self):
if not self.task_stack:
return
common_dict = None
common_keys = set()
for _, params in self.task_stack:
if common_dict is None:
common_dict = params.sos_dict
common_keys = set(params.sos_dict.keys())
else:
common_keys = {
key for key in common_keys if key in params.sos_dict and
common_dict[key] == params.sos_dict[key]
}
if not common_keys:
break
# if there is only one subtask, _output will be moved out of subtasks and makes
# the retrival of outputs difficult.
common_keys.discard("_output")
self.common_dict = {x: common_dict[x] for x in common_keys}
for _, params in self.task_stack:
params.sos_dict = {
k: v for k, v in params.sos_dict.items() if k not in common_keys
}
#
n_nodes = self._parse_num_workers(
self.sos_dict["_runtime"]["num_workers"])[0]
# trunk_workers and cores cannot be specified together, so if n_nodes > 1,
# nodes should not have been specified.
if n_nodes is not None and n_nodes > 1:
self.sos_dict["_runtime"]["nodes"] = n_nodes
return self
def combine_results(task_id, results):
# now we collect result
all_res = {
"ret_code": 0,
"output": None,
"subtasks": {},
"shared": {},
"skipped": 0,
"signature": {},
}
for res in results:
tid = res["task"]
all_res["subtasks"][tid] = res
if "exception" in res:
all_res["exception"] = res["exception"]
all_res["ret_code"] += 1
continue
all_res["ret_code"] += res["ret_code"]
if all_res["output"] is None:
all_res["output"] = copy.deepcopy(res["output"])
else:
try:
all_res["output"].extend(res["output"], keep_groups=True)
except Exception:
env.logger.warning(
f"Failed to extend output {all_res['output']} with {res['output']}"
)
all_res["shared"].update(res["shared"])
# does not care if one or all subtasks are executed or skipped.
all_res["skipped"] += res.get("skipped", 0)
if "signature" in res:
all_res["signature"].update(res["signature"])
if all_res["ret_code"] != 0:
if all_res["ret_code"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(
f"All {len(results)} tasks in {task_id} ``failed``")
else:
env.logger.debug(
f"All {len(results)} tasks in {task_id} ``failed``")
else:
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
else:
env.logger.debug(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
# if some failed, some skipped, not skipped
if "skipped" in all_res:
all_res.pop("skipped")
elif all_res["skipped"]:
if all_res["skipped"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
env.logger.debug(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
# if only partial skip, we still save signature and result etc
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
else:
env.logger.debug(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
all_res.pop("skipped")
else:
if env.config["run_mode"] == "run":
env.logger.info(
f"All {len(results)} tasks in {task_id} ``completed``")
else:
env.logger.debug(
f"All {len(results)} tasks in {task_id} ``completed``")
return all_res
class TaskStatus(Enum):
new = 0
pending = 1
submitted = 2
running = 3
aborted = 4
failed = 5
completed = 6
class TaskFile(object):
"""
The task file has the following format:
1. A binary header with the information of the structure of the file
with field defined by TaskHeader
2. compressed pickled param of task
3. compressed pulse file
4. compressed pickled result
5. compressed stdout
6. compressed stderr
7. compressed pickled signatures
"""
TaskHeader_v1 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v2 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v3 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size runtime_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader = TaskHeader_v3
header_fmt_v1 = "!2h 8d 6i 128s"
header_fmt_v2 = "!2h 8d 7i 124s"
header_fmt_v3 = "!2h 8d 8i 120s"
header_fmt = header_fmt_v3
header_size = 220 # struct.calcsize(header_fmt)
tags_offset = [92, 96, 100] # struct.calcsize(status_fmt + '6i')
tags_size = [128, 124, 120]
def __init__(self, task_id: str):
self.task_id = task_id
self.task_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task_id + ".task")
def save(self, params):
if os.path.isfile(self.task_file):
if self.status == "running":
env.logger.debug(
f"Task {self.task_id} is running and is not updated")
return
# keep original stuff but update params, which could contain
# new runtime info
self.params = params
return
# updating job_file will not change timestamp because it will be Only
# the update | |
or ``False``, then this is equivalent to
calling :func:`set_savefig` with that value.
:param cell_offset: is a vertical offset for this set of spikes in the
spike raster. By default this is ``0``. However, if the spike
raster contains stacked cells of different types (i.e. multiple
spike sets each with a different label), then the offset values
specifies the vertical offset for these cells.
"""
# Make sure this is a valid set of spikes
if isinstance(spikes, list) is False and \
isinstance(spikes, numpy.ndarray) is False:
errstr = 'spikes needs to be a list of lists or 2d numpy array'
raise TypeError(errstr)
# If there are no spikes to draw, just return
if len(spikes)==0:
return
# Remove any pre-existing lines
self._remove_lines(label)
# Get the minimum and maximum values of these spikes
try:
spikes_min_x, spikes_max_x = spiketrain.get_spike_bounds(spikes)
except (TypeError, listutil.ListEmptyError) as ex:
print ex
#raise(ex)
return
# If refresh is True, then we completely redraw the figure.
if self._fig is None:
# Make a new figure
self._fig = pyplot.figure(figsize=self._figsize)
self._raster_axes = None
if self._raster_axes is None:
# Make the spike axes. Set to fill the figure and adjust later if
# other axes are drawn.
self._raster_axes = self._fig.add_subplot(111)
self._raster_axes.tick_params(direction='out', length=4,
width=.75, color='black')
# Update the spike_params dictionary with these spikes.
try:
self._spike_params.pop(label)
except KeyError:
pass # Ignore if not present
if cell_offset > 0:
new_height = len(spikes) + cell_offset
for spike_params in self._spike_params.itervalues():
msize = self._calculate_markersize(new_height,
spike_params.markerscale)
for line in spike_params.raster_lines:
line.set_markersize(msize)
spike_params = spikeparams.SpikeParams(spikes=spikes, label=label, \
data_xlim = [spikes_min_x, spikes_max_x], \
cell_offset=cell_offset, \
marker=self._marker, markercolor=self._markercolor, \
markerscale=self._markerscale, \
markeredgewidth=self._markeredgewidth, \
linestyle=self._linestyle, linewidth=self._linewidth, \
sth_redraw=self._sth.redraw, \
sth_style=self._sth._style, sth_dt=self._sth._dt, \
sth_kernel=self._sth._kernel, sth_origin=self._sth._origin, \
sum_linewidth=self._sum._linewidth, \
sum_redraw=self._sum.redraw, \
sum_style=self._sum._style)
self._spike_params[label] = spike_params
# Update the complete data bounds
self._calculate_data_xlim()
# Update raster_axes ylim
height = self._calculate_ylim()
# Get the x data length
range_x = self._data_xlim[1] - self._data_xlim[0] + 1
# Pad the visible axis range a bit.
self._axes_lim[0] = numpy.rint(self._data_xlim[0] - \
(range_x*self._data_pad))
self._axes_lim[1] = max( \
numpy.rint(self._data_xlim[1] + (range_x*self._data_pad)), \
self._data_xlim[1] + 1)
self._raster_axes.set_xlim(self._axes_lim)
# markersize to be spike axes height / (num_cells+1)
m_size = self._calculate_markersize(height)
raster_lines = []
# Actually draw the spikes.
for i in xrange(len(spikes)):
row = numpy.ones_like(spikes[i])*(i + 1 + cell_offset)
line = lines.Line2D(spikes[i], row, \
linestyle=self._linestyle, linewidth=self._linewidth, \
color=self._markercolor, \
marker=self._marker, markeredgecolor=self._markercolor, \
markerfacecolor=self._markercolor, markersize=m_size, \
markeredgewidth=self._markeredgewidth)
self._raster_axes.add_line(line)
raster_lines.append(line)
self._spike_params[label].raster_lines = raster_lines
# If there is a SpikeTimeHistogram object, create or update it.
if self._sth_ratio > 0.:
if self._sth._axes is None:
self.set_sth_ratio(self._sth_ratio)
self._sth.plot(spike_params)
# If there is a SpikeSum object, create or update it.
if self._sum_ratio > 0.:
if self._sum._axes is None:
self.set_sum_ratio(self._sum_ratio)
self._sum.plot(spike_params)
# If we are supposed to draw, go ahead.
self._do_draw(draw)
# If we are supposed to save the figure, do so.
self._do_savefig(savefig)
def _remove_lines(self, label):
"""Remove the drawn spikes with this label from the raster axes."""
if self._raster_axes is not None:
try:
spike_params = self._spike_params.pop(label)
for line in spike_params.raster_lines:
self._raster_axes.lines.remove(line)
except KeyError:
pass # Ignore if the lines do not already exist
def _calculate_data_xlim(self):
"""Calculate the earliest and latest spike times out of all spikes
in the spikes dictionary."""
min_x = sys.maxint
max_x = 0.
for spike_param in self._spike_params.itervalues():
xlim = spike_param.data_xlim
#print xlim
if xlim[0] < min_x:
min_x = xlim[0]
if xlim[1] > max_x:
max_x = xlim[1]
self._data_xlim = [min_x, max_x]
def _calculate_ylim(self):
"""Calculate the total number of rows to draw. """
total_cells = 0
for spike_param in self._spike_params.itervalues():
num_cells = spike_param.cell_offset + len(spike_param.spikes)
if num_cells > total_cells:
total_cells = num_cells
ylim = float(total_cells + 1)
self._raster_axes.set_ylim(0.5, ylim-.5)
return total_cells
def _do_draw(self, draw=None):
"""Actually draw the figure. """
if draw is not None and (draw is True or draw is False):
self._draw = draw
if self._draw:
pyplot.draw()
pyplot.show()
def _do_savefig(self, savefig=None):
"""Actually save the figure to a file. """
if savefig is not None and vartest.isbool(savefig, 'savefig'):
self._savefig = savefig
if self._savefig:
if self._fig_name.endswith('.pdf'):
pdf = PdfPages(self._fig_name)
pdf.savefig(self._fig)
pdf.close()
else:
pyplot.savefig(self._fig_name)
def update_xlim(self, xlim):
"""Update the xlim of the raster axes and propagate this info to
the other axes if they are visible, redrawing and re-writing a
figure if necessary.
:param xlim: is a tuple of length 2 specifying the new axes bounds.
"""
if type(xlim) is tuple and len(xlim)==2:
self._raster_axes.set_xlim(xlim)
if self._sth is not None:
self._sth.update_xlim()
if self._sum is not None:
self._sum.update_xlim()
self._do_draw() # Draw if necessary
self._do_savefig() # Save the figure, if necessary
def _calculate_markersize(self, num_cells, markerscale=None):
"""Given the markerscale and the size of the raster axes, determine
the size of the marker to draw.
"""
pos = self._raster_axes.get_position()
if markerscale is None:
markerscale = self._markerscale
return float(self._raster_axes.bbox.size[1])/ \
float(num_cells) * (pos.height + pos.ymin) * markerscale
def get_savefig(self):
"""Returns the flag of whether a file is saved upon plotting."""
return self._savefig
def get_marker(self):
"""Returns the marker type for raster tick marks."""
return self._marker
def get_markercolor(self):
"""Returns the marker and line color."""
return self._markercolor
def get_markerscale(self):
"""Returns the relative size of the marker in the raster plot."""
return self._markerscale
def get_markeredgewidth(self):
"""Returns the size of the tick mark edge. In the case of a vertical
bar, this is the linewidth of that bar."""
return self._markeredgewidth
def get_linestyle(self):
"""Returns the linestyle of the raster plot."""
return self._linestyle
def get_linewidth(self):
"""Returns the line width of the raster plot if the linestyle is not
'none'."""
return self._linewidth
def get_fig_name(self):
"""Returns the name of the file that will be written when plotted
with the *savefig* variable set to True."""
return self._fig_name
def get_fig(self):
"""Returns a handle to the Figure."""
return self._fig
def get_raster_axes(self):
"""Returns a handle to the raster axes."""
return self._raster_axes
def get_figsize(self):
"""Returns the size of the figure in inches."""
return self._figsize
def get_sth(self):
"""Returns the instance of the SpikeTimeHistogram object."""
return self._sth
def get_sum(self):
"""Returns the instance of the SpikeSum object."""
return self._sum
def get_axes_pad(self):
"""Returns the amount of padding between the raster axes and the
spike time histogram and/or spike sum axes if they are drawn."""
return self._axes_pad
def set_axes_pad(self, pad):
"""
Set the amount of padding between the spike axes and
the spike time histogram and cumulative spike plot if
they are shown.
:param pad: amount to pad. This value is in figure
coordinates and needs to be between 0 and 1, and
probably very close to 0. A value of 0 means that
the axes touch each other.
:type pad: float; default 0.01
"""
vartest.inrange(pad, 0., 1., 'pad')
self._data_pad = pad
def get_data_pad(self):
"""Returns the amount of padding in terms of the data width
to draw a full-screen image. The data width is the last
spike in the data minus the first spike in the data.
The pad ensures that the first and last spike will appear
in the figure and not be blocked by the vertical axis."""
return self._data_pad
def set_data_pad(self, pad):
"""
Set the amount of padding in terms of the data width
to draw a full-screen image. The data width is the last
spike in the data minus the first spike in the data.
The pad ensures that the first and last spike will appear
in the figure and not be blocked by the vertical axis.
:param pad: amount to pad. The default value of 0.025 means
that the maximum x-axis dimensions will be
(first_spike - 0.025*data_width, last_spike + 0.025*data_width).
:type pad: float; default 0.025
"""
vartest.greater_than_or_equal(pad, 0., 'pad')
self._axes_pad = pad
def set_marker(self, marker):
"""
Sets the tick mark symbol. Valid markers are defined by
`Matplotlib.lines.Line2D markers \
<http://matplotlib.sourceforge.net/
api/artist_api.html#matplotlib.lines.Line2D.set_marker>`_.
"""
try:
self._line2d.set_marker(marker)
self._marker = marker
except Exception:
print "Invalid marker parameter:", | |
return psi_hat, u_hat, v_hat, w_hat, b_hat, p_hat
def __forcing_poly(self, P, Q):
""" If background fields are non-uniform, finds a forcing polynomial to transform unforced ODE with
inhomogeneous boundary conditions to a forced ODE with homogeneous boundary conditions.
Called by __fourier_solve"""
F = np.zeros_like(Q, dtype=complex)
R = np.zeros_like(Q, dtype=complex)
nk = Q.shape[0]
H = self.H
z = self.z
for i in range(nk):
a = P[i, 0]
b = Q[i, 0]
c = P[i, -1]
mat = np.array([[2, a - 2 / H], [-4 - c * H, -2 / H - c]])
v = np.array([a / H - b, c / H])
sol = np.dot(np.linalg.inv(mat), v)
A = sol[0]
B = sol[1]
F[i, :] = (1 - z / H) * (A * z ** 2 + B * z + 1)
R[i, :] = -((1 - z / H) * 2 * A - 2 / H * (2 * A * z + B) + P[i, :] *
((1 - z / H) * (2 * A * z + B) - 1 / H * (A * z ** 2 + B * z + 1)) + Q[i, :] * F[i, :])
return F, R
def __galerkin_sol(self, P, Q, R, F):
""" For non-uniform background flow, solves the forced ODE using Galerkin methods. Called by __fourier_solve."""
nz = Q.shape[1]
nk = Q.shape[0]
nm = self.nm
nfft = nz - 1
H = self.H
m0 = pi / H
mb = m0 * np.arange(1, nm + 1)
phi = np.zeros_like(Q, dtype=complex)
for ik in range(nk):
q = fft.dct(Q[ik, :], type=1)
q[0] /= 2 * nfft
q[1:] /= nfft
p = fft.dst(P[ik, 1:-1], type=1)
p = np.insert(p, 0, 0)
p = np.append(p, 0)
p /= nfft
r = fft.dst(R[ik, 1:-1], type=1)
r = np.insert(r, 0, 0)
r = np.append(r, 0)
r /= nfft
# Initialise matrix
A = np.zeros((nm, nm), dtype=complex)
for m in range(1, nm + 1):
for n in range(1, nm + 1):
if n == m:
A[n - 1, m - 1] = q[0] - (m0 * n) ** 2
elif n > m:
A[n - 1, m - 1] = 0.5 * m * m0 * p[n - m] + 0.5 * q[n - m]
else:
A[n - 1, m - 1] = -0.5 * m * m0 * p[m - n] + 0.5 * q[m - n]
if n + m < nfft:
A[n - 1, m - 1] = A[n - 1, m - 1] + 0.5 * m * m0 * p[n + m] - 0.5 * q[n + m]
r = r[1:nm + 1]
a = np.dot(np.linalg.inv(A), r)
mz = np.outer(self.z, mb)
phi[ik, :] = np.dot(np.sin(mz), a)
eta_hat = phi + F
return eta_hat
def __pad(self, field_hat, k_full, k_trunc):
""" Pads the truncated spectrum with zeros to a full spectrum that FFT can invert.
Called by __fourier_solve(). """
trunc_inds = np.zeros_like(k_trunc).astype(int)
nz = len(self.z)
nk_full = len(k_full)
for ik, k in enumerate(k_trunc):
trunc_inds[ik] = np.where(k_full == k)[0]
field_hat_pad = np.zeros((nk_full, nz), dtype=complex)
for iz in range(nz):
field_hat_pad[trunc_inds, iz] = field_hat[:, iz]
return field_hat_pad
def __inverse_transform(self, field_hat):
""" Takes the inverse Fourier transform (FFT) of the spectral variables. Called by solve()."""
nz = len(self.z)
field = np.zeros_like(field_hat, dtype=float)
# Loop through z values
for iz in range(nz):
field[:, iz] = 1 / self.dx * np.real(fft.fftshift(fft.ifft(fft.ifftshift(field_hat[:, iz]))))
return field
def __make_wave_fields_dataset(self, k, psi, u, v, w, b, p, h_topo_hat, psi_hat, u_hat, v_hat, w_hat, b_hat, p_hat,
U_2D, B_2D, N2_2D):
""" Turns wave fields into an xarray dataset."""
ds = xr.Dataset(
data_vars=dict(
psi=(["x", "z"], psi),
u=(["x", "z"], u),
v=(["x", "z"], v),
w=(["x", "z"], w),
b=(["x", "z"], b),
p=(["x", "z"], p),
U_2D=(["x", "z"], U_2D),
B_2D=(["x", "z"], B_2D),
N2_2D=(["x", "z"], N2_2D),
h_topo=(["x"], self.h_topo),
psi_hat=(["k", "z"], psi_hat),
u_hat=(["k", "z"], u_hat),
v_hat=(["k", "z"], v_hat),
w_hat=(["k", "z"], w_hat),
b_hat=(["k", "z"], b_hat),
p_hat=(["k", "z"], p_hat),
h_topo_hat=(["k"], h_topo_hat),
),
coords=dict(
x=(["x"], self.x),
k=(["k"], k),
z=(["z"], self.z),
),
attrs=dict(description="Lee wave solver output fields")
)
ds.h_topo.attrs["long_name"] = "Topographic height"
ds.psi.attrs["long_name"] = "Perturbation streamfunction"
ds.u.attrs["long_name"] = "Perturbation velocity u"
ds.v.attrs["long_name"] = "Perturbation velocity v"
ds.w.attrs["long_name"] = "Perturbation velocity w"
ds.b.attrs["long_name"] = "Perturbation velocity b"
ds.p.attrs["long_name"] = "Perturbation pressure p"
ds.b.attrs["long_name"] = "Perturbation buoyancy b"
ds.B_2D.attrs["long_name"] = "Background buoyancy b"
ds.U_2D.attrs["long_name"] = "Background velocity U"
ds.N2_2D.attrs["long_name"] = "Background stratification N^2"
ds.h_topo.attrs["units"] = "m"
ds.psi.attrs["units"] = "m^2/s"
ds.u.attrs["units"] = "m/s"
ds.v.attrs["units"] = "m/s"
ds.w.attrs["units"] = "m/s"
ds.w.attrs["units"] = "m/s"
ds.b.attrs["units"] = "m/s^2"
ds.p.attrs["units"] = "kg/m/s^2"
ds.U_2D.attrs["units"] = "m/s"
ds.B_2D.attrs["units"] = "m/s^2"
ds.N2_2D.attrs["units"] = "1/s"
ds.h_topo_hat.attrs["long_name"] = "Horizontal Fourier transform of topographic height h_topo"
ds.psi_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation streamfunction"
ds.u_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation velocity u"
ds.v_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation velocity v"
ds.w_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation velocity w"
ds.b_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation velocity b"
ds.p_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation pressure p"
ds.b_hat.attrs["long_name"] = "Horizontal Fourier transform of perturbation buoyancy b"
ds.x.attrs["long_name"] = "Horizontal distance"
ds.z.attrs["long_name"] = "Height above bottom"
ds.k.attrs["long_name"] = "Horizontal wavenumber"
ds.h_topo_hat.attrs["units"] = "m^2"
ds.psi_hat.attrs["units"] = "m^3/s"
ds.u_hat.attrs["units"] = "m^2/s"
ds.v_hat.attrs["units"] = "m^2/s"
ds.w_hat.attrs["units"] = "m^2/s"
ds.w_hat.attrs["units"] = "m^2/s"
ds.b_hat.attrs["units"] = "m^2/s^2"
ds.p_hat.attrs["units"] = "kg/s^2"
ds.x.attrs["units"] = "m"
ds.z.attrs["units"] = "m"
ds.k.attrs["units"] = "rad/m"
return ds
def __make_diags_dataset(self):
""" Calculates energy diagnostics from wave fields and puts them in an xarray dataset."""
ds = self.wave_fields
if self.hydrostatic:
alpha = 0
else:
alpha = 1
# Horizontal averages: use Parseval. Prefactor of sums is (1/2/L)*(1/2/pi)*dk = 1/4/L^2
prefac = 1 / 4 / self.L ** 2
E_flux_1D = prefac * np.real(np.sum(ds.p_hat * np.conj(ds.w_hat), axis=0))
E_kinetic_2D = 0.5 * self.rho_0 * (ds.u ** 2 + ds.v ** 2 + alpha * ds.w ** 2)
E_potential_2D = 0.5 * self.rho_0 * (ds.b ** 2 / ds.N2_2D)
E_2D = E_kinetic_2D + E_potential_2D
E_kinetic_1D = np.sum(E_kinetic_2D, axis=0) * self.dx / 2 / self.L
E_potential_1D = np.sum(E_potential_2D, axis=0) * self.dx / 2 / self.L
E_1D = E_kinetic_1D + E_potential_1D
u_x = ds.u.differentiate('x')
v_x = ds.v.differentiate('x')
w_x = ds.w.differentiate('x')
b_x = ds.b.differentiate('x')
diss_rate_2D = self.Ah * (u_x ** 2 + v_x ** 2 + alpha * w_x ** 2)
mixing_2D = self.Dh * (b_x ** 2 / ds.N2_2D)
D_2D = diss_rate_2D + mixing_2D
diss_rate_1D = np.sum(diss_rate_2D, axis=0) * self.dx / 2 / self.L
mixing_1D = np.sum(mixing_2D, axis=0) * self.dx / 2 / self.L
D_1D = np.sum(D_2D, axis=0) * self.dx / 2 / self.L
EP_flux = prefac * np.real(np.sum(ds.u_hat * np.conj(ds.w_hat), axis=0) -
np.sum(self.f * ds.v_hat * np.conj(ds.b_hat), axis=0) / self.N ** 2)
EP_flux_z = EP_flux.differentiate('z')
drag = E_flux_1D[0] / self.U[0]
w_rms = np.sqrt(prefac * np.sum(np.abs(ds.w_hat) ** 2, axis=0))
ds2 = xr.Dataset(
data_vars=dict(
E_flux_1D=(["z"], E_flux_1D.values),
E_kinetic_1D=(["z"], E_kinetic_1D.values),
E_potential_1D=(["z"], E_potential_1D.values),
E_1D=(["z"], E_1D.values),
diss_rate_1D=(["z"], diss_rate_1D.values),
mixing_1D=(["z"], mixing_1D.values),
D_1D=(["z"], D_1D.values),
EP_flux=(["z"], EP_flux.values),
EP_flux_z=(["z"], EP_flux_z.values),
drag=([], drag.values),
w_rms=(["z"], w_rms.values),
E_kinetic_2D=(["x", "z"], E_kinetic_2D.values),
E_potential_2D=(["x", "z"], E_potential_2D.values),
E_2D=(["x", "z"], E_2D.values),
diss_rate_2D=(["x", "z"], diss_rate_2D.values),
mixing_2D=(["x", "z"], mixing_2D.values),
D_2D=(["x", "z"], D_2D.values),
),
coords=dict(
x=(["x"], self.x),
z=(["z"], self.z),
),
attrs=dict(description="Lee wave solver diagnostics")
)
ds2.E_flux_1D.attrs["long_name"] = "Horizontally averaged vertical energy flux"
ds2.E_kinetic_1D.attrs["long_name"] = "Horizontally averaged kinetic energy density"
ds2.E_potential_1D.attrs["long_name"] = "Horizontally averaged potential energy density"
ds2.E_1D.attrs["long_name"] = "Horizontally averaged energy density"
ds2.diss_rate_1D.attrs["long_name"] = "Horizontally averaged dissipation rate"
ds2.mixing_1D.attrs["long_name"] = "Horizontally averaged mixing"
ds2.D_1D.attrs["long_name"] = "Horizontally averaged energy loss"
ds2.EP_flux.attrs["long_name"] = "Horizontally averaged Eliassen-Palm flux"
ds2.EP_flux_z.attrs["long_name"] = "Vertical gradient of horizontally averaged Eliassen-Palm flux"
ds2.drag.attrs["long_name"] = "Horizontally averaged wave drag"
ds2.w_rms.attrs["long_name"] = "RMS vertical velocity (horizontally averaged)"
ds2.E_kinetic_2D.attrs["long_name"] = "Kinetic energy density"
ds2.E_potential_2D.attrs["long_name"] = "Potential energy density"
ds2.E_2D.attrs["long_name"] = "Energy density"
ds2.diss_rate_2D.attrs["long_name"] = "Dissipation rate"
ds2.mixing_2D.attrs["long_name"] = "Mixing"
ds2.D_2D.attrs["long_name"] = "Energy loss"
ds2.x.attrs["long_name"] = "Horizontal distance"
ds2.z.attrs["long_name"] = "Height above bottom"
ds2.E_flux_1D.attrs["units"] = "kg/s^3"
ds2.E_kinetic_1D.attrs["units"] = "kg/m/s^2"
ds2.E_potential_1D.attrs["units"] = "kg/m/s^2"
ds2.E_1D.attrs["units"] = "kg/m/s^2"
ds2.diss_rate_1D.attrs["units"] = "m^2/s^3"
ds2.mixing_1D.attrs["units"] = "m^2/s^3"
ds2.D_1D.attrs["units"] = "m^2/s^3"
ds2.EP_flux.attrs["units"] = "m^2/s^2"
ds2.EP_flux_z.attrs["units"] = "m/s^2"
ds2.drag.attrs["units"] = "kg/m/s^2"
ds2.w_rms.attrs["units"] = "m/s"
ds2.E_kinetic_2D.attrs["units"] = "kg/m/s^2"
ds2.E_potential_2D.attrs["units"] = | |
<filename>sbysrc/sby_core.py
#
# SymbiYosys (sby) -- Front-end for Yosys-based formal verification flows
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import os, re, sys, signal, platform
if os.name == "posix":
import resource, fcntl
import subprocess
from shutil import copyfile, copytree, rmtree
from select import select
from time import time, localtime, sleep, strftime
from sby_design import SbyProperty, SbyModule, design_hierarchy
all_procs_running = []
def force_shutdown(signum, frame):
print("SBY ---- Keyboard interrupt or external termination signal ----", flush=True)
for proc in list(all_procs_running):
proc.terminate()
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def process_filename(filename):
if filename.startswith("~/"):
filename = os.environ['HOME'] + filename[1:]
filename = os.path.expandvars(filename)
return filename
class SbyProc:
def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, silent=False):
self.running = False
self.finished = False
self.terminated = False
self.checkretcode = False
self.task = task
self.info = info
self.deps = deps
if os.name == "posix":
self.cmdline = cmdline
else:
# Windows command interpreter equivalents for sequential
# commands (; => &) command grouping ({} => ()).
replacements = {
";" : "&",
"{" : "(",
"}" : ")",
}
parts = cmdline.split("'")
for i in range(len(parts)):
if i % 2 == 0:
cmdline_copy = parts[i]
for u, w in replacements.items():
cmdline_copy = cmdline_copy.replace(u, w)
parts[i] = cmdline_copy
self.cmdline = '"'.join(parts)
self.logfile = logfile
self.noprintregex = None
self.notify = []
self.linebuffer = ""
self.logstderr = logstderr
self.silent = silent
self.task.procs_pending.append(self)
for dep in self.deps:
dep.register_dep(self)
self.output_callback = None
self.exit_callback = None
def register_dep(self, next_proc):
if self.finished:
next_proc.poll()
else:
self.notify.append(next_proc)
def log(self, line):
if line is not None and (self.noprintregex is None or not self.noprintregex.match(line)):
if self.logfile is not None:
print(line, file=self.logfile)
self.task.log(f"{self.info}: {line}")
def handle_output(self, line):
if self.terminated or len(line) == 0:
return
if self.output_callback is not None:
line = self.output_callback(line)
self.log(line)
def handle_exit(self, retcode):
if self.terminated:
return
if self.logfile is not None:
self.logfile.close()
if self.exit_callback is not None:
self.exit_callback(retcode)
def terminate(self, timeout=False):
if self.task.opt_wait and not timeout:
return
if self.running:
if not self.silent:
self.task.log(f"{self.info}: terminating process")
if os.name == "posix":
try:
os.killpg(self.p.pid, signal.SIGTERM)
except PermissionError:
pass
self.p.terminate()
self.task.procs_running.remove(self)
all_procs_running.remove(self)
self.terminated = True
def poll(self):
if self.finished or self.terminated:
return
if not self.running:
for dep in self.deps:
if not dep.finished:
return
if not self.silent:
self.task.log(f"{self.info}: starting process \"{self.cmdline}\"")
if os.name == "posix":
def preexec_fn():
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.setpgrp()
self.p = subprocess.Popen(["/usr/bin/env", "bash", "-c", self.cmdline], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=(subprocess.STDOUT if self.logstderr else None), preexec_fn=preexec_fn)
fl = fcntl.fcntl(self.p.stdout, fcntl.F_GETFL)
fcntl.fcntl(self.p.stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
else:
self.p = subprocess.Popen(self.cmdline, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=(subprocess.STDOUT if self.logstderr else None))
self.task.procs_pending.remove(self)
self.task.procs_running.append(self)
all_procs_running.append(self)
self.running = True
return
while True:
outs = self.p.stdout.readline().decode("utf-8")
if len(outs) == 0: break
if outs[-1] != '\n':
self.linebuffer += outs
break
outs = (self.linebuffer + outs).strip()
self.linebuffer = ""
self.handle_output(outs)
if self.p.poll() is not None:
if not self.silent:
self.task.log(f"{self.info}: finished (returncode={self.p.returncode})")
self.task.procs_running.remove(self)
all_procs_running.remove(self)
self.running = False
if self.p.returncode == 127:
self.task.status = "ERROR"
if not self.silent:
self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.")
self.terminated = True
self.task.terminate()
return
self.handle_exit(self.p.returncode)
if self.checkretcode and self.p.returncode != 0:
self.task.status = "ERROR"
if not self.silent:
self.task.log(f"{self.info}: task failed. ERROR.")
self.terminated = True
self.task.terminate()
return
self.finished = True
for next_proc in self.notify:
next_proc.poll()
return
class SbyAbort(BaseException):
pass
class SbyConfig:
def __init__(self):
self.options = dict()
self.engines = list()
self.script = list()
self.files = dict()
self.verbatim_files = dict()
pass
def parse_config(self, f):
mode = None
for line in f:
raw_line = line
if mode in ["options", "engines", "files"]:
line = re.sub(r"\s*(\s#.*)?$", "", line)
if line == "" or line[0] == "#":
continue
else:
line = line.rstrip()
# print(line)
if mode is None and (len(line) == 0 or line[0] == "#"):
continue
match = re.match(r"^\s*\[(.*)\]\s*$", line)
if match:
entries = match.group(1).split()
if len(entries) == 0:
self.error(f"sby file syntax error: {line}")
if entries[0] == "options":
mode = "options"
if len(self.options) != 0 or len(entries) != 1:
self.error(f"sby file syntax error: {line}")
continue
if entries[0] == "engines":
mode = "engines"
if len(self.engines) != 0 or len(entries) != 1:
self.error(f"sby file syntax error: {line}")
continue
if entries[0] == "script":
mode = "script"
if len(self.script) != 0 or len(entries) != 1:
self.error(f"sby file syntax error: {line}")
continue
if entries[0] == "file":
mode = "file"
if len(entries) != 2:
self.error(f"sby file syntax error: {line}")
current_verbatim_file = entries[1]
if current_verbatim_file in self.verbatim_files:
self.error(f"duplicate file: {entries[1]}")
self.verbatim_files[current_verbatim_file] = list()
continue
if entries[0] == "files":
mode = "files"
if len(entries) != 1:
self.error(f"sby file syntax error: {line}")
continue
self.error(f"sby file syntax error: {line}")
if mode == "options":
entries = line.split()
if len(entries) != 2:
self.error(f"sby file syntax error: {line}")
self.options[entries[0]] = entries[1]
continue
if mode == "engines":
entries = line.split()
self.engines.append(entries)
continue
if mode == "script":
self.script.append(line)
continue
if mode == "files":
entries = line.split()
if len(entries) == 1:
self.files[os.path.basename(entries[0])] = entries[0]
elif len(entries) == 2:
self.files[entries[0]] = entries[1]
else:
self.error(f"sby file syntax error: {line}")
continue
if mode == "file":
self.verbatim_files[current_verbatim_file].append(raw_line)
continue
self.error(f"sby file syntax error: {line}")
def error(self, logmessage):
raise SbyAbort(logmessage)
class SbyTask(SbyConfig):
def __init__(self, sbyconfig, workdir, early_logs, reusedir):
super().__init__()
self.used_options = set()
self.models = dict()
self.workdir = workdir
self.reusedir = reusedir
self.status = "UNKNOWN"
self.total_time = 0
self.expect = list()
self.design_hierarchy = None
self.precise_prop_status = False
yosys_program_prefix = "" ##yosys-program-prefix##
self.exe_paths = {
"yosys": os.getenv("YOSYS", yosys_program_prefix + "yosys"),
"abc": os.getenv("ABC", yosys_program_prefix + "yosys-abc"),
"smtbmc": os.getenv("SMTBMC", yosys_program_prefix + "yosys-smtbmc"),
"suprove": os.getenv("SUPROVE", "suprove"),
"aigbmc": os.getenv("AIGBMC", "aigbmc"),
"avy": os.getenv("AVY", "avy"),
"btormc": os.getenv("BTORMC", "btormc"),
"pono": os.getenv("PONO", "pono"),
}
self.procs_running = []
self.procs_pending = []
self.start_clock_time = time()
if os.name == "posix":
ru = resource.getrusage(resource.RUSAGE_CHILDREN)
self.start_process_time = ru.ru_utime + ru.ru_stime
self.summary = list()
self.logfile = open(f"{workdir}/logfile.txt", "a")
for line in early_logs:
print(line, file=self.logfile, flush=True)
if not reusedir:
with open(f"{workdir}/config.sby", "w") as f:
for line in sbyconfig:
print(line, file=f)
def taskloop(self):
for proc in self.procs_pending:
proc.poll()
while len(self.procs_running):
fds = []
for proc in self.procs_running:
if proc.running:
fds.append(proc.p.stdout)
if os.name == "posix":
try:
select(fds, [], [], 1.0) == ([], [], [])
except InterruptedError:
pass
else:
sleep(0.1)
for proc in self.procs_running:
proc.poll()
for proc in self.procs_pending:
proc.poll()
if self.opt_timeout is not None:
total_clock_time = int(time() - self.start_clock_time)
if total_clock_time > self.opt_timeout:
self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all subprocesses.")
self.status = "TIMEOUT"
self.terminate(timeout=True)
def log(self, logmessage):
tm = localtime()
print("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), flush=True)
print("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), file=self.logfile, flush=True)
def error(self, logmessage):
tm = localtime()
print("SBY {:2d}:{:02d}:{:02d} [{}] ERROR: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), flush=True)
print("SBY {:2d}:{:02d}:{:02d} [{}] ERROR: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), file=self.logfile, flush=True)
self.status = "ERROR"
if "ERROR" not in self.expect:
self.retcode = 16
else:
self.retcode = 0
self.terminate()
with open(f"{self.workdir}/{self.status}", "w") as f:
print(f"ERROR: {logmessage}", file=f)
raise SbyAbort(logmessage)
def makedirs(self, path):
if self.reusedir and os.path.isdir(path):
rmtree(path, ignore_errors=True)
os.makedirs(path)
def copy_src(self):
os.makedirs(self.workdir + "/src")
for dstfile, lines in self.verbatim_files.items():
dstfile = self.workdir + "/src/" + dstfile
self.log(f"Writing '{dstfile}'.")
with open(dstfile, "w") as f:
for line in lines:
f.write(line)
for dstfile, srcfile in self.files.items():
if dstfile.startswith("/") or dstfile.startswith("../") or ("/../" in dstfile):
self.error(f"destination filename must be a relative path without /../: {dstfile}")
dstfile = self.workdir + "/src/" + dstfile
srcfile = process_filename(srcfile)
basedir = os.path.dirname(dstfile)
if basedir != "" and not os.path.exists(basedir):
os.makedirs(basedir)
self.log(f"Copy '{os.path.abspath(srcfile)}' to '{os.path.abspath(dstfile)}'.")
if os.path.isdir(srcfile):
copytree(srcfile, dstfile, dirs_exist_ok=True)
else:
copyfile(srcfile, dstfile)
def handle_str_option(self, option_name, default_value):
if option_name in self.options:
self.__dict__["opt_" + option_name] = self.options[option_name]
self.used_options.add(option_name)
else:
self.__dict__["opt_" + option_name] = default_value
def handle_int_option(self, option_name, default_value):
if option_name in self.options:
self.__dict__["opt_" | |
string
}
tags:
- table_favorite_color
- name: fav_number
description: "The user's favorite number"
tests:
- accepted_values:
values: [3.14159265]
quote: false
tags: # tags can be a list of strings
- favorite_number_is_pi
- name: table_summary
description: "The summary table"
columns:
- name: favorite_color_copy
description: "The favorite color"
tests:
- not_null
- unique
- accepted_values: { values: ['blue', 'green'] }
- relationships: { field: favorite_color, to: ref('table_copy') }
tags:
- table_favorite_color
- name: count
description: "The number of responses for this favorite color"
tests:
- not_null
# all of these constraints will fail
- name: table_failure_copy
description: "The table copy that does not comply with the schema"
columns:
- name: id
description: "The user ID"
tests:
- not_null
- unique
tags:
- xfail
- name: favorite_color
description: "The user's favorite color"
tests:
- accepted_values: { values: ['blue', 'green'] }
tags:
- xfail
# all of these constraints will fail
- name: table_failure_summary
description: "The table summary that does not comply with the schema"
columns:
- name: favorite_color
description: "The favorite color"
tests:
- accepted_values: { values: ['red'] }
- relationships: { field: favorite_color, to: ref('table_copy') }
tags:
- xfail
# this table is disabled so these tests should be ignored
- name: table_disabled
description: "A disabled table"
columns:
- name: favorite_color
description: "The favorite color"
tests:
- accepted_values: { values: ['red'] }
- relationships: { field: favorite_color, to: ref('table_copy') }
# all of these constraints will fail
- name: table_failure_null_relation
description: "A table with a null value where it should be a foreign key"
columns:
- name: id
description: "The user ID"
tests:
- relationships: { field: id, to: ref('table_failure_copy') }
tags:
- xfail
"""
models_v2__models__table_summary_sql = """
{{
config(
materialized='table'
)
}}
select favorite_color as favorite_color_copy, count(*) as count
from {{ ref('table_copy') }}
group by 1
"""
models_v2__models__table_failure_summary_sql = """
{{
config(
materialized='table'
)
}}
-- force a foreign key constraint failure here
select 'purple' as favorite_color, count(*) as count
from {{ ref('table_failure_copy') }}
group by 1
"""
models_v2__models__table_disabled_sql = """
{{
config(
enabled=False
)
}}
-- force a foreign key constraint failure here
select 'purple' as favorite_color, count(*) as count
from {{ ref('table_failure_copy') }}
group by 1
"""
models_v2__models__table_failure_null_relation_sql = """
{{
config(
materialized='table'
)
}}
-- force a foreign key constraint failure here
select 105 as id, count(*) as count
from {{ ref('table_failure_copy') }}
group by 1
"""
models_v2__models__table_failure_copy_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed_failure
"""
models_v2__models__table_copy_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed
"""
models_v2__malformed__schema_yml = """
version: 2
models:
# this whole model should fail and not run
- name: table_copy
description: "A copy of the table"
columns:
- name: id
description: "The ID"
tests:
- not_null
- unique
- name: favorite_color
tests:
# this is missing a "-" and is malformed
accepted_values: { values: ['blue', 'green'] }
# this whole model should pass and run
- name: table_summary
description: "The summary table"
columns:
- name: favorite_color
description: "The favorite color"
tests:
- not_null
- unique
- accepted_values: { values: ['blue', 'green'] }
- relationships: { field: favorite_color, to: ref('table_copy') }
- name: count
description: "The number of responses for this favorite color"
tests:
- not_null
"""
models_v2__malformed__table_summary_sql = """
{{
config(
materialized='table'
)
}}
select favorite_color, count(*) as count
from {{ ref('table_copy') }}
group by 1
"""
models_v2__malformed__table_copy_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed
"""
models_v2__override_get_test_models_fail__schema_yml = """
version: 2
models:
- name: my_model
description: "The table has 1 null values, and we're not okay with that."
columns:
- name: id
description: "The number of responses for this favorite color - purple will be null"
tests:
- not_null
"""
models_v2__override_get_test_models_fail__my_model_sql = """
select 1 as id
UNION ALL
select null as id
"""
models_v2__custom_configs__schema_yml = """
version: 2
models:
- name: table_copy
description: "A copy of the table"
# passes
tests:
- where
- error_if
- warn_if
- limit
- fail_calc
columns:
- name: id
tests:
# relationships with where
- relationships:
to: ref('table_copy') # itself
field: id
where: 1=1
- name: table_copy_another_one
tests:
- where: # test override + weird quoting
config:
where: "\\"favorite_color\\" = 'red'"
- name: "table.copy.with.dots"
description: "A copy of the table with a gross name"
# passes, see https://github.com/dbt-labs/dbt-core/issues/3857
tests:
- where
"""
models_v2__custom_configs__table_copy_another_one_sql = """
select * from {{ ref('table_copy') }}
"""
models_v2__custom_configs__table_copy_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed
"""
models_v2__custom_configs__table_copy_with_dots_sql = """
select * from {{ ref('table_copy') }}
"""
models_v2__render_test_configured_arg_models__schema_yml = """
version: 2
models:
- name: model
tests:
- equivalent:
value: "{{ var('myvar', 'baz') }}-bar"
"""
models_v2__render_test_configured_arg_models__model_sql = """
select 1 as id
"""
models_v2__custom__schema_yml = """
version: 2
models:
- name: table_copy
description: "A copy of the table"
columns:
- name: email
tests:
- not_null
- name: id
description: "The ID"
tests:
- unique
- name: favorite_color
tests:
- every_value_is_blue
- rejected_values: { values: ['orange', 'purple'] }
# passes
tests:
- local_dep.equality: { compare_model: ref('table_copy') }
"""
models_v2__custom__table_copy_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed
"""
models_v2__limit_null__schema_yml = """
version: 2
models:
- name: table_limit_null
description: "The table has 1 null values, and we're okay with that, until it's more than 1."
columns:
- name: favorite_color_full_list
description: "The favorite color"
- name: count
description: "The number of responses for this favorite color - purple will be null"
tests:
- not_null:
error_if: '>1'
warn_if: '>1'
- name: table_warning_limit_null
description: "The table has 1 null value, and we're okay with 1, but want to know of any."
columns:
- name: favorite_color_full_list
description: "The favorite color"
- name: count
description: "The number of responses for this favorite color - purple will be null"
tests:
- not_null:
error_if: '>1'
- name: table_failure_limit_null
description: "The table has some 2 null values, and that's not ok. Warn and error."
columns:
- name: favorite_color_full_list
description: "The favorite color"
- name: count
description: "The number of responses for this favorite color - purple will be null"
tests:
- not_null:
error_if: '>1'
"""
models_v2__limit_null__table_warning_limit_null_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ref('table_limit_null')}}
"""
models_v2__limit_null__table_limit_null_sql = """
{{
config(
materialized='table'
)
}}
select favorite_color as favorite_color_full_list, count(*) as count
from {{ this.schema }}.seed
group by 1
UNION ALL
select 'purple' as favorite_color_full_list, null as count
"""
models_v2__limit_null__table_failure_limit_null_sql = """
{{
config(
materialized='table'
)
}}
select * from {{ref('table_limit_null')}}
UNION ALL
select 'magenta' as favorite_color_full_list, null as count
"""
local_utils__dbt_project_yml = """
name: 'local_utils'
version: '1.0'
config-version: 2
profile: 'default'
macro-paths: ["macros"]
"""
local_utils__macros__datediff_sql = """
{% macro datediff(first_date, second_date, datepart) %}
{{ return(adapter.dispatch('datediff', 'local_utils')(first_date, second_date, datepart)) }}
{% endmacro %}
{% macro default__datediff(first_date, second_date, datepart) %}
datediff(
{{ datepart }},
{{ first_date }},
{{ second_date }}
)
{% endmacro %}
{% macro postgres__datediff(first_date, second_date, datepart) %}
{% if datepart == 'year' %}
(date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))
{% elif datepart == 'quarter' %}
({{ adapter.dispatch('datediff', 'local_utils')(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))
{% else %}
( 1000 )
{% endif %}
{% endmacro %}
"""
local_utils__macros__current_timestamp_sql = """
{% macro current_timestamp() -%}
{{ return(adapter.dispatch('current_timestamp')) }}
{%- endmacro %}
{% macro default__current_timestamp() -%}
now()
{%- endmacro %}
"""
local_utils__macros__custom_test_sql = """
{% macro test_dispatch(model) -%}
{{ return(adapter.dispatch('test_dispatch', macro_namespace = 'local_utils')()) }}
{%- endmacro %}
{% macro default__test_dispatch(model) %}
select {{ adapter.dispatch('current_timestamp', macro_namespace = 'local_utils')() }}
{% endmacro %}
"""
ephemeral__schema_yml = """
version: 2
models:
- name: ephemeral
columns:
- name: id
tests:
- unique
"""
ephemeral__ephemeral_sql = """
{{ config(materialized='ephemeral') }}
select 1 as id
"""
quote_required_models__schema_yml = """
version: 2
models:
- name: model
columns:
- name: Id
quote: true
tests:
- unique
- not_null
- name: model_again
quote_columns: true
columns:
- name: Id
tests:
- unique
- not_null
- name: model_noquote
quote_columns: true
columns:
- name: Id
quote: false
tests:
- unique
- not_null
sources:
# this should result in column quoting = true
- name: my_source
schema: "{{ target.schema }}"
quoting:
column: true
tables:
- name: model
quoting:
column: false
columns:
- name: Id
quote: true
tests:
- unique
- name: my_source_2
schema: "{{ target.schema }}"
quoting:
column: false
tables:
# this should result in column quoting = true
- name: model
quoting:
column: true
columns:
- name: Id
tests:
- unique
# this should result in column quoting = false
- name: model_noquote
columns:
- name: Id
tests:
- unique
"""
quote_required_models__model_again_sql = """
select 1 as "Id"
"""
quote_required_models__model_noquote_sql = """
select 1 as id
"""
quote_required_models__model_sql = """
select 1 as "Id"
"""
@pytest.fixture(scope="class")
def wrong_specification_block():
return {"schema.yml": wrong_specification_block__schema_yml}
@pytest.fixture(scope="class")
def test_context_where_subq_models():
return {
"schema.yml": test_context_where_subq_models__schema_yml,
"model_a.sql": test_context_where_subq_models__model_a_sql,
}
@pytest.fixture(scope="class")
def test_utils():
return {
"dbt_project.yml": test_utils__dbt_project_yml,
"macros": {
"current_timestamp.sql": test_utils__macros__current_timestamp_sql,
"custom_test.sql": test_utils__macros__custom_test_sql,
},
}
@pytest.fixture(scope="class")
def local_dependency():
return {
"dbt_project.yml": local_dependency__dbt_project_yml,
"macros": {"equality.sql": local_dependency__macros__equality_sql},
}
@pytest.fixture(scope="class")
def case_sensitive_models():
return {
"schema.yml": case_sensitive_models__schema_yml,
"lowercase.sql": case_sensitive_models__lowercase_sql,
}
@pytest.fixture(scope="class")
def test_context_macros():
| |
"tree": None,
"filename": None,
"line": None,
"comment": "Pull-Request has been closed by pingou",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
},
{
"id": 2,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "Pull-Request has been reopened by pingou",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
},
],
},
"agent": "pingou",
},
),
pagure_messages.PullRequestReopenedV1(
topic="pagure.pull-request.reopened",
body={
"pullrequest": {
"id": 1,
"uid": ANY,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"full_url": "http://localhost.localdomain/test",
"fullname": "test",
"url_path": "test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": None,
"status": "Open",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [
{
"id": 1,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "Pull-Request has been closed by pingou",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
},
{
"id": 2,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "Pull-Request has been reopened by pingou",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
},
],
},
"agent": "pingou",
},
),
):
output = self.app.post(
"/test/pull-request/1/reopen",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'return window.confirm("Are you sure you want to reopen this requested pull?")',
output_text,
)
@patch.dict(
"pagure.config.config", {"FEDORA_MESSAGING_NOTIFICATIONS": True}
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_pull_requests_assign(self):
"""Test the update_pull_requests endpoint when assigning a PR."""
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session, self.path, new_project=None, branch_from="feature"
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# No such project
output = self.app.post("/foo/pull-request/1/update")
self.assertEqual(output.status_code, 404)
output = self.app.post("/test/pull-request/100/update")
self.assertEqual(output.status_code, 404)
# Invalid input
output = self.app.post(
"/test/pull-request/1/update", follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertNotIn("Request assigned", output_text)
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"user": "pingou"}
# No CSRF
output = self.app.post(
"/test/pull-request/1/update", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertNotIn("Request assigned", output_text)
# Invalid assignee
data = {"csrf_token": csrf_token, "user": "bar"}
output = self.app.post(
"/test/pull-request/1/update", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn("No user "bar" found", output_text)
# Assign the PR
data = {"csrf_token": csrf_token, "user": "pingou"}
user.username = "foo"
with tests.user_set(self.app.application, user):
output = self.app.post(
"/test/pull-request/1/update", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 403)
user.username = "pingou"
with tests.user_set(self.app.application, user):
with testing.mock_sends(
api.Message(
topic="pagure.request.assigned.added",
body={
"request": {
"id": 1,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"uid": ANY,
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": {
"name": "pingou",
"fullname": "<NAME>",
"full_url": "http://localhost.localdomain/user/pingou",
"url_path": "user/pingou",
},
"status": "Open",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [],
},
"pullrequest": {
"id": 1,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"uid": ANY,
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"full_url": "http://localhost.localdomain/test",
"url_path": "test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"status": "Open",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [],
},
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"full_url": "http://localhost.localdomain/test",
"url_path": "test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"agent": "pingou",
},
),
pagure_messages.PullRequestAssignedAddedV1(
topic="pagure.pull-request.assigned.added",
body={
"pullrequest": {
"id": 1,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"uid": ANY,
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
| |
method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_facebook_app" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/conversations/messaging/facebook/app'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FacebookAppCredentials',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations(self, **kwargs):
"""
Get a list of Integrations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_messaging_integrations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:param str expand: Expand instructions for the return value.
:param str supported_content_id: Filter integrations returned based on the supported content ID
:return: MessagingIntegrationEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number', 'expand', 'supported_content_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_integrations" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/conversations/messaging/integrations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'supported_content_id' in params:
query_params['supportedContent.id'] = params['supported_content_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MessagingIntegrationEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations_facebook(self, **kwargs):
"""
Get a list of Facebook Integrations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_messaging_integrations_facebook(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:param str expand: Expand instructions for the return value.
:param str supported_content_id: Filter integrations returned based on the supported content ID
:return: FacebookIntegrationEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number', 'expand', 'supported_content_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_integrations_facebook" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/conversations/messaging/integrations/facebook'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'supported_content_id' in params:
query_params['supportedContent.id'] = params['supported_content_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FacebookIntegrationEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations_facebook_integration_id(self, integration_id, **kwargs):
"""
Get a Facebook messaging integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_messaging_integrations_facebook_integration_id(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str integration_id: Integration ID (required)
:param str expand: Expand instructions for the return value.
:return: FacebookIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_integrations_facebook_integration_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `get_conversations_messaging_integrations_facebook_integration_id`")
resource_path = '/api/v2/conversations/messaging/integrations/facebook/{integrationId}'.replace('{format}', 'json')
path_params = {}
if 'integration_id' in params:
path_params['integrationId'] = params['integration_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FacebookIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations_line(self, **kwargs):
"""
Get a list of LINE messenger Integrations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_messaging_integrations_line(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:param str expand: Expand instructions for the return value.
:param str supported_content_id: Filter integrations returned based on the supported content ID
:return: LineIntegrationEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number', 'expand', 'supported_content_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_integrations_line" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/conversations/messaging/integrations/line'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'supported_content_id' in params:
query_params['supportedContent.id'] = params['supported_content_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LineIntegrationEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations_line_integration_id(self, integration_id, **kwargs):
"""
Get a LINE messenger integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_conversations_messaging_integrations_line_integration_id(integration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str integration_id: Integration ID (required)
:param str expand: Expand instructions for the return value.
:return: LineIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversations_messaging_integrations_line_integration_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `get_conversations_messaging_integrations_line_integration_id`")
resource_path = '/api/v2/conversations/messaging/integrations/line/{integrationId}'.replace('{format}', 'json')
path_params = {}
if 'integration_id' in params:
path_params['integrationId'] = params['integration_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LineIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_conversations_messaging_integrations_open(self, **kwargs):
"""
| |
from .editor_util import get_def_lines, get_defrange, excise_def_lines, overwrite_import
from .import_util import get_import_stmt_str
__all__ = [
"nix_surplus_imports",
"shorten_imports",
"receive_imports",
"copy_src_defs_to_dst",
"remove_copied_defs",
"transfer_mvdefs",
]
def nix_surplus_imports(self, record_removed_import_n=False):
"""
Remove imports marked in the agenda as "lose" (src/dst) or "move" (src only).
Bound as a method of `SrcFile`/`DstFile` classes, and used within `transfer_mvdefs`
in step 1 part 1 (dst: removed_import_n) & step 5 part 1 (src: no removed_import_n).
"""
# print("Step 1: Remove imports marked dst⠶lose")
# print("Step 5: Remove imports marked src⠶{move,lose}")
self.removed_import_n = []
for rm_i in self.rm_agenda: # sets .rm_agenda
# Remove rm_i (imported name marked "lose" for dst, or marked "move"/"lose" for
# dst) from the destination or source file using the line numbers of `self.trunk`,
# computed as `dst.imports` by `get_imports`
# (a destructive operation, so line numbers of `self.trunk` no longer valid),
# if the removal of the imported name leaves no other imports on a line,
# otherwise shorten that line by removing the import alias(es) marked "lose"
info = self.rm_agenda.get(rm_i)
imp_src_ending = info.get("import").split(".")[-1]
# Retrieve the index of the line in import list
rm_i_n = info.get("n")
rm_i_linecount = self.import_counts[rm_i_n]
if rm_i_linecount > 1:
# This means there is ≥1 other import alias in the import statement
# for this name, so remove it from it (i.e. "shorten" the statement.
info["shorten"] = info.get("n_i")
else:
# This means there is nothing from this module being imported yet, so
# must remove entire import statement (i.e. delete entire line range)
if record_removed_import_n:
# self is DstFile
self.removed_import_n.append(rm_i_n)
else:
# self is SrcFile
info["shorten"] = None
imp_startline = self.imports[rm_i_n].first_token.start[0]
imp_endline = self.imports[rm_i_n].last_token.end[0]
imp_linerange = [imp_startline - 1, imp_endline]
for i in range(*imp_linerange):
self.lines[i] = None
info["shorten"] = None
def shorten_imports(self, record_removed_import_n=False):
"""
Shorten the imports based on the annotations set by `nix_surplus_imports`
(potentially removing an import statement entirely if its list of imported
names becomes shortened to 0).
Bound as a method of `SrcFile`/`DstFile` classes, and used within `transfer_mvdefs`
in step 1 part 2 (dst: removed_import_n) & step 5 part 2 (src: no removed_import_n).
"""
self.to_shorten = {}
for rm_i in self.rm_agenda:
if self.rm_agenda.get(rm_i).get("shorten") is not None:
self.to_shorten.update({rm_i: self.rm_agenda.get(rm_i)})
self.n_to_short = set([self.to_shorten.get(x).get("n") for x in self.to_shorten])
# Group all names being shortened that are of a common import statement
for n in self.n_to_short:
names_to_short = [
x for x in self.to_shorten if self.to_shorten.get(x).get("n") == n
]
n_i_to_short = [self.to_shorten.get(a).get("n_i") for a in names_to_short]
# Rewrite `self.imports[n]` with all aliases except those in `names_to_short`
imp_module = self.modules[n]
pre_imp = self.imports[n]
shortened_alias_list = [(a.name, a.asname) for a in pre_imp.names]
# Proceed backwards from the end to the start, permitting deletions by index
for (name, asname) in shortened_alias_list[::-1]:
if asname is None and name not in names_to_short:
continue
elif asname is not None and asname not in names_to_short:
continue
del_i = shortened_alias_list.index((name, asname))
del shortened_alias_list[del_i]
if len(shortened_alias_list) == 0:
if record_removed_import_n:
# All dst import aliases were removed, so remove entire import statement
self.removed_import_n.append(n)
imp_startline = pre_imp.first_token.start[0]
imp_endline = pre_imp.last_token.end[0]
imp_linerange = [imp_startline - 1, imp_endline]
for i in range(*imp_linerange):
self.lines[i] = None
else:
imp_stmt_str = get_import_stmt_str(shortened_alias_list, imp_module)
overwrite_import(pre_imp, imp_stmt_str, self.lines)
def receive_imports(link):
"""
Receive imports marked in the `link.dst.rcv_agenda`.
Bound as a method of the `FileLink` class, and used in step 2 of `transfer_mvdefs`.
"""
# print("Step 2: Add imports marked dst⠶{move,copy}")
for rc_i in link.dst.rcv_agenda: # sets rcv_agenda
# Transfer mv_i into the destination file: receive "move" as "take"
# Transfer cp_i into the destination file: receive "copy" as "echo"
dst_info = link.dst.rcv_agenda.get(rc_i)
imp_src_ending = dst_info.get("import").split(".")[-1]
# Use name/asname to retrieve the index of the line in import list to get
# the module which is at the same index in the list of src modules:
rc_i_n = dst_info.get("n")
rc_i_module = link.src.modules[rc_i_n]
# Compare the imported name to the module if one exists
if rc_i_module is not None:
dst_module_set = set(link.dst.modules).difference({None})
if rc_i_module in dst_module_set:
# This means there is already ≥1 ast.ImportFrom statement (i.e. a
# line) which imports from the same module as the to-be-added import
# name does, so combine it with this existing line. Assume the 1st
# such ImportFrom is to be extended (ignoring other possible ones).
dst_info["extend"] = link.dst.modules.index(rc_i_module)
else:
# This means there is nothing from this module being imported yet,
# so must create a new ImportFrom statement (i.e. a new line)
dst_info["extend"] = None
else:
# This means `rc_i` is an ast.Import statement, not ImportFrom
# (PEP8 recommends separate imports, so do not extend another)
dst_info["extend"] = None
link.dst.to_extend = {}
for rc_i in link.dst.rcv_agenda:
if link.dst.rcv_agenda.get(rc_i).get("extend") is not None:
link.dst.to_extend.update({rc_i: link.dst.rcv_agenda.get(rc_i)})
link.dst.n_to_extend = set(
[link.dst.to_extend.get(x).get("n") for x in link.dst.to_extend]
)
# Group all names being added as extensions that are of a common import statement
for n in link.dst.n_to_extend:
names_to_extend = [
x for x in link.dst.to_extend if link.dst.to_extend.get(x).get("n") == n
]
# Rewrite `link.dst.imports[n]` to include the aliases in `names_to_extend`
imp_module = link.dst.modules[n]
pre_imp = link.dst.imports[n]
extended_alias_list = [(a.name, a.asname) for a in pre_imp.names]
for rc_i in names_to_extend:
dst_info = link.dst.to_extend.get(rc_i)
imp_src = dst_info.get("import")
imp_src_ending = imp_src.split(".")[-1]
if rc_i == imp_src_ending:
rc_i_name, rc_i_as = rc_i, None
elif imp_module is not None:
rc_i_name, rc_i_as = imp_src_ending, rc_i
else:
rc_i_name, rc_i_as = imp_src, rc_i
extended_alias_list.append((rc_i_name, rc_i_as))
imp_stmt_str = get_import_stmt_str(extended_alias_list, imp_module)
overwrite_import(pre_imp, imp_stmt_str, link.dst.lines)
# Next, put any import names marked "take" or "echo" that are not extensions
# of existing import statements into new lines (this breaks the line index).
#
# Firstly, find the insertion point for new import statements by re-processing
# the list of lines (default to start of file if it has no import statements)
link.dst.import_n = [
n for n, _ in enumerate(link.dst.imports) if n not in link.dst.removed_import_n
]
# sets .imports ⇢ sets .trunk
if len(link.dst.import_n) == 0:
# Place any new import statements at the start of the file, as none exist yet
link.dst.last_imp_end = (
0 # 1-based index logic: this means "before the first line"
)
else:
last_import = link.dst.imports[link.dst.import_n[-1]]
link.dst.last_imp_end = last_import.last_token.end[0] # Leave in 1-based index
# Collect import statements to insert after the last one
link.dst._ins_imp_stmts = []
link.dst._seen_multimodule_imports = set()
for rc_i in link.dst.rcv_agenda:
dst_info = link.dst.rcv_agenda.get(rc_i)
if (
rc_i in link.dst._seen_multimodule_imports
or dst_info.get("extend") is not None
):
continue
imp_src = dst_info.get("import")
imp_src_ending = imp_src.split(".")[-1]
rc_i_n = dst_info.get("n")
rc_i_module = link.src.modules[rc_i_n]
if rc_i == imp_src_ending:
rc_i_name, rc_i_as = rc_i, None
elif rc_i_module is not None:
rc_i_name, rc_i_as = imp_src_ending, rc_i
else:
rc_i_name, rc_i_as = imp_src, rc_i
alias_list = [(rc_i_name, rc_i_as)]
for r in link.dst.rcv_agenda:
r_src_module = link.src.modules[link.dst.rcv_agenda.get(r).get("n")]
if r == rc_i or None in [rc_i_module, r_src_module]:
continue
if r_src_module == rc_i_module:
link.dst._seen_multimodule_imports.add(r)
r_dst_info = link.dst.rcv_agenda.get(r)
r_imp_src = r_dst_info.get("import")
r_imp_src_ending = r_imp_src.split(".")[-1]
r_n = r_dst_info.get("n")
r_module = link.src.modules[r_n]
if r == r_imp_src_ending:
r_name, r_as = r, None
elif r_module is not None:
r_name, r_as = r_imp_src_ending, r
else:
r_name, r_as = r_dst_info.get("import"), r
alias_list.append((rc_i_name, rc_i_as))
# Create the Import or ImportFrom statement
imp_stmt_str = get_import_stmt_str(alias_list, rc_i_module)
link.dst._ins_imp_stmts.append(imp_stmt_str)
link.dst.lines = (
link.dst.lines[: link.dst.last_imp_end]
+ link.dst._ins_imp_stmts
+ link.dst.lines[link.dst.last_imp_end :]
)
# sets dst.lines
def trim_whitespace_lines_pre(lines, else_error=None):
it = iter(l.rstrip() for l in lines)
if any(it):
first_nonblank_i = len(lines) - len([*it]) - 1
for _ in range(first_nonblank_i):
lines.pop(0) # remove the whitespace prefix lines
elif else_error:
raise else_error
return lines
def copy_src_defs_to_dst(link):
"""
Transfer mvdef into the destination file i.e. 'receive mvdef', where mvdef is an
`ast.FunctionDefinition` node with start/end position annotations using the line
numbers of `link.src.trunk`, computed as `link.src.defs_to_move` by
`.ast_tokens.get_defs` (in the `hasattr` check block of the
`.transfer.SrcFile.defs_to_move` property itself). This is an append operation, so
line numbers from `link.src.trunk` remain valid.
Bound as a method of the `FileLink` class, and used in step 3 of `transfer_mvdefs`.
"""
# print("Step 3: copy function definitions {mvdefs} from src to dst")
# The following line sets .defs_to_move ⇢ sets .trunk ⇢ sets .lines
link.set_src_defs_to_move()
for mvdef in link.src.defs_to_move:
indent = | |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import glob
import os
import sys
import time
import warnings
from abc import ABC, abstractmethod
from collections import namedtuple
import nemo
from nemo.utils import get_checkpoint_from_dir
try:
import wandb
_WANDB_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
_WANDB_AVAILABLE = False
logging = nemo.logging
class ActionCallback(ABC):
"""Abstract interface for callbacks.
"""
def __init__(self):
self._registered_tensors = {}
self._action = None
@property
def step(self):
return self.action.step
@property
def epoch_num(self):
return self.action.epoch_num
@property
def registered_tensors(self):
return self._registered_tensors
@property
def local_rank(self):
return self.action.local_rank
@property
def global_rank(self):
return self.action.global_rank
@property
def action(self):
return self._action
@action.setter
def action(self, action_obj):
self._action = action_obj
@property
def logger(self):
warnings.warn("This will be deprecated in future releases. Please use nemo.logging instead")
return nemo.logging
def on_action_start(self):
pass
def on_action_end(self):
pass
def on_epoch_start(self):
pass
def on_epoch_end(self):
pass
def on_iteration_start(self):
pass
def on_iteration_end(self):
pass
class ModuleSaverCallback(ActionCallback):
"""
For callback documentation: please see
https://nvidia.github.io/NeMo/tutorials/callbacks.html
"""
def __init__(
self, save_modules_list, step_freq=1000, folder=None, checkpoints_to_keep=4,
):
super().__init__()
self._save_modules_list = save_modules_list
self._folder = folder
self._step_freq = step_freq
self._ckpt2keep = checkpoints_to_keep
self._saved_ckpts = []
def on_iteration_end(self):
step = self.step
if (
self._step_freq > 0
and step % self._step_freq == 0
and step > 0
and (self.global_rank is None or self.global_rank == 0)
):
for m in self._save_modules_list:
class_name = m.__class__.__name__
uid = m.unique_instance_id
fn = f"{class_name}_{uid}-STEP-{step}.pt"
if self._folder is None:
file_name = fn
else:
file_name = os.path.join(self._folder, fn)
logging.info(f"Saving module {class_name} in {file_name}")
m.save_to(file_name)
logging.info("Saved.")
self._saved_ckpts.append(f'-{self.step}.pt')
if len(self._saved_ckpts) > self._ckpt2keep:
for end in self._saved_ckpts[: -self._ckpt2keep]:
for file in glob.glob(f'{self._folder}/*{end}'):
os.remove(file)
self._saved_ckpts = self._saved_ckpts[-self._ckpt2keep :]
def on_action_end(self):
step = self.step
if self.global_rank is None or self.global_rank == 0:
for m in self._save_modules_list:
class_name = m.__class__.__name__
uid = m.unique_instance_id
fn = f"{class_name}_{uid}-STEP-{step}.pt"
if self._folder is None:
file_name = fn
else:
file_name = os.path.join(self._folder, fn)
logging.info(f"Saving module {class_name} in {file_name}")
m.save_to(file_name)
logging.info("Saved.")
class SimpleLossLoggerCallback(ActionCallback):
"""
For callback documentation: please see
https://nvidia.github.io/NeMo/tutorials/callbacks.html
"""
def __init__(
self, tensors, print_func=None, get_tb_values=None, log_to_tb_func=None, step_freq=25, tb_writer=None,
):
super().__init__()
if not isinstance(tensors, list):
tensors = [tensors]
self._tensors = tensors
self._print_func = print_func
self._get_tb_values = get_tb_values
self._log_to_tb_func = log_to_tb_func
self._step_freq = step_freq
self._swriter = tb_writer
self._start_time = None
self._last_epoch_start = None
self._last_iter_start = None
@property
def tensors(self):
return self._tensors
def on_action_start(self):
if self.global_rank is None or self.global_rank == 0:
logging.info("Starting :) :).....")
self._start_time = time.time()
def on_action_end(self):
if self.global_rank is None or self.global_rank == 0:
if self._swriter is not None:
self._swriter.close()
logging.info(f"Done in {time.time() - self._start_time}")
def on_epoch_start(self):
if self.global_rank is None or self.global_rank == 0:
logging.info(f"Starting epoch {self.epoch_num}")
self._last_epoch_start = time.time()
def on_epoch_end(self):
if self.global_rank is None or self.global_rank == 0:
step = self.step
run_time = time.time() - self._last_epoch_start
logging.info(f"Finished epoch {self.epoch_num} in {run_time}")
if self._swriter is not None:
value = self.epoch_num
self._swriter.add_scalar('misc/epoch', value, step)
value = time.time() - self._last_epoch_start
self._swriter.add_scalar('misc/epoch_time', value, step)
def on_iteration_start(self):
if self.global_rank is None or self.global_rank == 0:
self._last_iter_start = time.time()
def on_iteration_end(self):
if self.global_rank is None or self.global_rank == 0:
step = self.step
if step % self._step_freq == 0:
tensor_values = [self.registered_tensors[t.unique_name] for t in self.tensors]
logging.info(f"Step: {step}")
if self._print_func:
self._print_func(tensor_values)
sys.stdout.flush()
if self._swriter is not None:
if self._get_tb_values:
tb_objects = self._get_tb_values(tensor_values)
for name, value in tb_objects:
value = value.item()
self._swriter.add_scalar(name, value, step)
if self._log_to_tb_func:
self._log_to_tb_func(self._swriter, tensor_values, step)
run_time = time.time() - self._last_iter_start
self._swriter.add_scalar('misc/step_time', run_time, step)
run_time = time.time() - self._last_iter_start
logging.info(f"Step time: {run_time} seconds")
class CheckpointCallback(ActionCallback):
"""
For callback documentation: please see
https://nvidia.github.io/NeMo/tutorials/callbacks.html
"""
def __init__(
self, folder, load_from_folder=None, step_freq=-1, epoch_freq=-1, checkpoints_to_keep=4, force_load=False,
):
super().__init__()
if step_freq == -1 and epoch_freq == -1:
logging.warning("No checkpoints will be saved because step_freq and epoch_freq are both -1.")
if step_freq > -1 and epoch_freq > -1:
logging.warning("You config the model to save by both steps and epochs. Please use one or the other")
epoch_freq = -1
self._step_freq = step_freq
self._epoch_freq = epoch_freq
self._folder = folder
self._load_from_folder = load_from_folder if load_from_folder else folder
self._ckpt2keep = checkpoints_to_keep
self._saved_ckpts = []
# If True, run will fail if we cannot load module weights
self._force_load = force_load
def __save_to(self, path):
if self.global_rank is not None and self.global_rank != 0:
return
if not os.path.isdir(path):
logging.info(f"Creating {path} folder")
os.makedirs(path, exist_ok=True)
unique_mod_names = set()
for module in self.action.modules:
if module.num_weights > 0:
if str(module) in unique_mod_names:
raise NotImplementedError(
"There were two instances of the same module. Please overwrite __str__() of one of the "
"modules."
)
unique_mod_names.add(str(module))
if self._step_freq > -1:
filename = f"{module}-STEP-{self.step}.pt"
else:
filename = f"{module}-EPOCH-{self.epoch_num}.pt"
module.save_to(os.path.join(path, filename))
if self._step_freq > -1:
filename = f"trainer-STEP-{self.step}.pt"
self.action.save_state_to(f'{path}/{filename}')
self._saved_ckpts.append(f'-{self.step}.pt')
else:
filename = f"trainer-EPOCH-{self.epoch_num}.pt"
self.action.save_state_to(f'{path}/{filename}')
self._saved_ckpts.append(f'-{self.epoch_num}.pt')
if len(self._saved_ckpts) > self._ckpt2keep:
for end in self._saved_ckpts[: -self._ckpt2keep]:
for file in glob.glob(f'{path}/*{end}'):
os.remove(file)
self._saved_ckpts = self._saved_ckpts[-self._ckpt2keep :]
logging.info(f'Saved checkpoint: {path}/{filename}')
torch.cuda.empty_cache()
print ('Releasing GPU memory after model save')
def __restore_from(self, path):
if not os.path.isdir(path):
if self._force_load:
raise ValueError("force_load was set to True for checkpoint callback but a checkpoint was not found.")
logging.warning(f"Checkpoint folder {path} not found!")
else:
logging.info(f"Found checkpoint folder {path}. Will attempt to restore checkpoints from it.")
modules_to_restore = []
modules_to_restore_name = []
for module in self.action.modules:
if module.num_weights > 0:
modules_to_restore.append(module)
modules_to_restore_name.append(str(module))
try:
module_checkpoints = get_checkpoint_from_dir(modules_to_restore_name, path)
for mod, checkpoint in zip(modules_to_restore, module_checkpoints):
mod.restore_from(checkpoint, self.local_rank)
except (BaseException, ValueError) as e:
if self._force_load:
raise ValueError(
"force_load was set to True for checkpoint callback but a checkpoint was not found."
)
logging.warning(e)
logging.warning(
f"Checkpoint folder {path} was present but nothing was restored. Continuing training from random "
"initialization."
)
return
try:
trainer_checkpoints = get_checkpoint_from_dir(["trainer"], path)
for tr, checkpoint in zip([self.action], trainer_checkpoints):
tr.restore_state_from(checkpoint)
except (BaseException, ValueError) as e:
logging.warning(e)
logging.warning(
"Trainer state such as optimizer state and current step/epoch was not restored. Pretrained weights"
" have still been restore and fine-tuning should continue fine."
)
return
def on_action_start(self):
num_parameters = 0
unique_mod_names = set()
for module in self.action.modules:
if module.num_weights > 0:
if str(module) in unique_mod_names:
raise NotImplementedError(
"There were two instances of the same module. Please overwrite __str__() of one of the "
"modules."
)
unique_mod_names.add(str(module))
num_parameters += module.num_weights
logging.info(f"Found {len(unique_mod_names)} modules with " f"weights:")
for name in unique_mod_names:
logging.info(f"{name}")
logging.info(f"Total model parameters: {num_parameters}")
self.__restore_from(path=self._load_from_folder)
def on_iteration_end(self):
step = self.step
if self._step_freq > 0 and step % self._step_freq == 0 and step > 0:
self.__save_to(path=self._folder)
def on_action_end(self):
if self._step_freq > 0 or self._epoch_freq > 0:
self.__save_to(path=self._folder)
def on_epoch_start(self):
self._last_epoch_start = time.time()
def on_epoch_end(self):
if self._epoch_freq > 0:
if self.global_rank is None or self.global_rank == 0:
run_time = time.time() - self._last_epoch_start
logging.info(f'Finished epoch {self.epoch_num} in {run_time}')
if (self.epoch_num + 1) % self._epoch_freq == 0:
self.__save_to(path=self._folder)
class EvaluatorCallback(ActionCallback):
"""
For callback documentation: please see
https://nvidia.github.io/NeMo/tutorials/callbacks.html
"""
def __init__(
self,
eval_tensors,
user_iter_callback,
user_epochs_done_callback,
tb_writer=None,
tb_writer_func=None,
eval_step=1,
eval_epoch=None,
wandb_name=None,
wandb_project=None,
):
# TODO: Eval_epoch currently does nothing
if eval_step is None and eval_epoch is None:
raise ValueError("Either eval_step or eval_epoch must be set. " f"But got: {eval_step} and {eval_epoch}")
if (eval_step is not None and eval_step <= 0) or (eval_epoch is not None and eval_epoch <= 0):
raise ValueError(f"Eval_step and eval_epoch must be > 0." f"But got: {eval_step} and {eval_epoch}")
super().__init__()
self._eval_tensors = eval_tensors
self._swriter = tb_writer
self._tb_writer_func = tb_writer_func
self._eval_frequency = eval_step
# will be passed to callbacks below
self._global_var_dict = {}
# Callbacks
self.user_iter_callback = user_iter_callback
self.user_done_callback = user_epochs_done_callback
# Weights and biases
self._wandb_project = wandb_project
self._wandb_name = wandb_name
@property
def eval_tensors(self):
return self._eval_tensors
@property
def tb_writer_func(self):
return self._tb_writer_func
@property
def swriter(self):
return self._swriter
def on_epoch_end(self):
pass
def on_iteration_end(self):
step = self.step
if step % self._eval_frequency == 0:
if self.global_rank == 0 or self.global_rank is None:
logging.info('Doing Evaluation ' + '.' * 30)
start_time = | |
"""
Source: https://modis-land.gsfc.nasa.gov/pdf/sn_bound_10deg.txt
"""
from typing import Tuple
from rio_tiler.errors import RioTilerError
class InvalidModlandGridID(RioTilerError):
"""Invalid MODLAND grid id."""
# Only non-fill tiles (460)
# format:
# horizontal_grid, vertical_grid, bbox(xmin, ymin, xmax, ymax)
MODLAND_GRID = [
("14", "00", (-180.0, 80.0, -172.7151, 80.4083)),
("15", "00", (-180.0, 80.0, -115.1274, 83.625)),
("16", "00", (-180.0, 80.0, -57.5397, 86.8167)),
("17", "00", (-180.0, 80.0, 57.2957, 90.0)),
("18", "00", (-0.004, 80.0, 180.0, 90.0)),
("19", "00", (57.5877, 80.0, 180.0, 86.8167)),
("20", "00", (115.1754, 80.0, 180.0, 83.625)),
("21", "00", (172.7631, 80.0, 180.0, 80.4083)),
("11", "01", (-180.0, 70.0, -175.4039, 70.5333)),
("12", "01", (-180.0, 70.0, -146.1659, 73.875)),
("13", "01", (-180.0, 70.0, -116.9278, 77.1667)),
("14", "01", (-180.0, 70.0, -87.6898, 80.0)),
("15", "01", (-172.7631, 70.0, -58.4517, 80.0)),
("16", "01", (-115.1754, 70.0, -29.2137, 80.0)),
("17", "01", (-57.5877, 70.0, 0.048, 80.0)),
("18", "01", (0.0, 70.0, 57.6357, 80.0)),
("19", "01", (29.238, 70.0, 115.2234, 80.0)),
("20", "01", (58.4761, 70.0, 172.8111, 80.0)),
("21", "01", (87.7141, 70.0, 180.0, 80.0)),
("22", "01", (116.9522, 70.0, 180.0, 77.1583)),
("23", "01", (146.1902, 70.0, 180.0, 73.875)),
("24", "01", (175.4283, 70.0, 180.0, 70.5333)),
("09", "02", (-180.0, 60.0, -159.9833, 63.6167)),
("10", "02", (-180.0, 60.0, -139.9833, 67.1167)),
("11", "02", (-180.0, 60.0, -119.9833, 70.0)),
("12", "02", (-175.4283, 60.0, -99.9833, 70.0)),
("13", "02", (-146.1902, 60.0, -79.9833, 70.0)),
("14", "02", (-116.9522, 60.0, -59.9833, 70.0)),
("15", "02", (-87.7141, 60.0, -39.9833, 70.0)),
("16", "02", (-58.4761, 60.0, -19.9833, 70.0)),
("17", "02", (-29.238, 60.0, 0.0244, 70.0)),
("18", "02", (0.0, 60.0, 29.2624, 70.0)),
("19", "02", (20.0, 60.0, 58.5005, 70.0)),
("20", "02", (40.0, 60.0, 87.7385, 70.0)),
("21", "02", (60.0, 60.0, 116.9765, 70.0)),
("22", "02", (80.0, 60.0, 146.2146, 70.0)),
("23", "02", (100.0, 60.0, 175.4526, 70.0)),
("24", "02", (120.0, 60.0, 180.0, 70.0)),
("25", "02", (140.0, 60.0, 180.0, 67.1167)),
("26", "02", (160.0, 60.0, 180.0, 63.6167)),
("06", "03", (-180.0, 50.0, -171.1167, 52.3333)),
("07", "03", (-180.0, 50.0, -155.5594, 56.2583)),
("08", "03", (-180.0, 50.0, -140.0022, 60.0)),
("09", "03", (-180.0, 50.0, -124.4449, 60.0)),
("10", "03", (-160.0, 50.0, -108.8877, 60.0)),
("11", "03", (-140.0, 50.0, -93.3305, 60.0)),
("12", "03", (-120.0, 50.0, -77.7732, 60.0)),
("13", "03", (-100.0, 50.0, -62.216, 60.0)),
("14", "03", (-80.0, 50.0, -46.6588, 60.0)),
("15", "03", (-60.0, 50.0, -31.1015, 60.0)),
("16", "03", (-40.0, 50.0, -15.5443, 60.0)),
("17", "03", (-20.0, 50.0, 0.0167, 60.0)),
("18", "03", (0.0, 50.0, 20.0167, 60.0)),
("19", "03", (15.5572, 50.0, 40.0167, 60.0)),
("20", "03", (31.1145, 50.0, 60.0167, 60.0)),
("21", "03", (46.6717, 50.0, 80.0167, 60.0)),
("22", "03", (62.229, 50.0, 100.0167, 60.0)),
("23", "03", (77.7862, 50.0, 120.0167, 60.0)),
("24", "03", (93.3434, 50.0, 140.0167, 60.0)),
("25", "03", (108.9007, 50.0, 160.0167, 60.0)),
("26", "03", (124.4579, 50.0, 180.0, 60.0)),
("27", "03", (140.0151, 50.0, 180.0, 60.0)),
("28", "03", (155.5724, 50.0, 180.0, 56.25)),
("29", "03", (171.1296, 50.0, 180.0, 52.3333)),
("04", "04", (-180.0, 40.0, -169.6921, 43.7667)),
("05", "04", (-180.0, 40.0, -156.638, 48.1917)),
("06", "04", (-180.0, 40.0, -143.5839, 50.0)),
("07", "04", (-171.1296, 40.0, -130.5299, 50.0)),
("08", "04", (-155.5724, 40.0, -117.4758, 50.0)),
("09", "04", (-140.0151, 40.0, -104.4217, 50.0)),
("10", "04", (-124.4579, 40.0, -91.3676, 50.0)),
("11", "04", (-108.9007, 40.0, -78.3136, 50.0)),
("12", "04", (-93.3434, 40.0, -65.2595, 50.0)),
("13", "04", (-77.7862, 40.0, -52.2054, 50.0)),
("14", "04", (-62.229, 40.0, -39.1513, 50.0)),
("15", "04", (-46.6717, 40.0, -26.0973, 50.0)),
("16", "04", (-31.1145, 40.0, -13.0432, 50.0)),
("17", "04", (-15.5572, 40.0, 0.013, 50.0)),
("18", "04", (0.0, 40.0, 15.5702, 50.0)),
("19", "04", (13.0541, 40.0, 31.1274, 50.0)),
("20", "04", (26.1081, 40.0, 46.6847, 50.0)),
("21", "04", (39.1622, 40.0, 62.2419, 50.0)),
("22", "04", (52.2163, 40.0, 77.7992, 50.0)),
("23", "04", (65.2704, 40.0, 93.3564, 50.0)),
("24", "04", (78.3244, 40.0, 108.9136, 50.0)),
("25", "04", (91.3785, 40.0, 124.4709, 50.0)),
("26", "04", (104.4326, 40.0, 140.0281, 50.0)),
("27", "04", (117.4867, 40.0, 155.5853, 50.0)),
("28", "04", (130.5407, 40.0, 171.1426, 50.0)),
("29", "04", (143.5948, 40.0, 180.0, 50.0)),
("30", "04", (156.6489, 40.0, 180.0, 48.1917)),
("31", "04", (169.7029, 40.0, 180.0, 43.7583)),
("02", "05", (-180.0, 30.0, -173.1955, 33.5583)),
("03", "05", (-180.0, 30.0, -161.6485, 38.95)),
("04", "05", (-180.0, 30.0, -150.1014, 40.0)),
("05", "05", (-169.7029, 30.0, -138.5544, 40.0)),
("06", "05", (-156.6489, 30.0, -127.0074, 40.0)),
("07", "05", (-143.5948, 30.0, -115.4604, 40.0)),
("08", "05", (-130.5407, 30.0, -103.9134, 40.0)),
("09", "05", (-117.4867, 30.0, -92.3664, 40.0)),
("10", "05", (-104.4326, 30.0, -80.8194, 40.0)),
("11", "05", (-91.3785, 30.0, -69.2724, 40.0)),
("12", "05", (-78.3244, 30.0, -57.7254, 40.0)),
("13", "05", (-65.2704, 30.0, -46.1784, 40.0)),
("14", "05", (-52.2163, 30.0, -34.6314, 40.0)),
("15", "05", (-39.1622, 30.0, -23.0844, 40.0)),
("16", "05", (-26.1081, 30.0, -11.5374, 40.0)),
("17", "05", (-13.0541, 30.0, 0.0109, 40.0)),
("18", "05", (0.0, 30.0, 13.065, 40.0)),
("19", "05", (11.547, 30.0, 26.119, 40.0)),
("20", "05", (23.094, 30.0, 39.1731, 40.0)),
("21", "05", (34.641, 30.0, 52.2272, 40.0)),
("22", "05", (46.188, 30.0, 65.2812, 40.0)),
("23", "05", (57.735, 30.0, 78.3353, 40.0)),
("24", "05", (69.282, 30.0, 91.3894, 40.0)),
("25", "05", (80.829, 30.0, 104.4435, 40.0)),
("26", "05", (92.376, 30.0, 117.4975, 40.0)),
("27", "05", (103.923, 30.0, 130.5516, 40.0)),
("28", "05", (115.4701, 30.0, 143.6057, 40.0)),
("29", "05", (127.0171, 30.0, 156.6598, 40.0)),
("30", "05", (138.5641, 30.0, 169.7138, 40.0)),
("31", "05", (150.1111, 30.0, 180.0, 40.0)),
("32", "05", (161.6581, 30.0, 180.0, 38.9417)),
("33", "05", (173.2051, 30.0, 180.0, 33.5583)),
("01", "06", (-180.0, 20.0, -170.2596, 27.2667)),
("02", "06", (-180.0, 20.0, -159.6178, 30.0)),
("03", "06", (-173.2051, 20.0, -148.976, 30.0)),
("04", "06", (-161.6581, 20.0, -138.3342, 30.0)),
("05", "06", (-150.1111, 20.0, -127.6925, 30.0)),
("06", "06", (-138.5641, 20.0, -117.0507, 30.0)),
("07", "06", (-127.0171, 20.0, -106.4089, 30.0)),
("08", "06", (-115.4701, 20.0, -95.7671, 30.0)),
("09", "06", (-103.923, 20.0, -85.1254, 30.0)),
("10", "06", (-92.376, 20.0, -74.4836, 30.0)),
("11", "06", (-80.829, 20.0, -63.8418, 30.0)),
("12", "06", (-69.282, 20.0, -53.2, 30.0)),
("13", "06", (-57.735, 20.0, -42.5582, 30.0)),
("14", "06", (-46.188, 20.0, -31.9165, 30.0)),
("15", "06", (-34.641, 20.0, -21.2747, 30.0)),
("16", "06", (-23.094, 20.0, -10.6329, 30.0)),
("17", "06", (-11.547, 20.0, 0.0096, 30.0)),
("18", "06", (0.0, 20.0, 11.5566, 30.0)),
("19", "06", (10.6418, 20.0, 23.1036, 30.0)),
("20", "06", (21.2836, 20.0, 34.6506, 30.0)),
("21", "06", (31.9253, 20.0, 46.1976, 30.0)),
("22", "06", (42.5671, 20.0, 57.7446, 30.0)),
("23", "06", (53.2089, 20.0, 69.2917, 30.0)),
("24", "06", (63.8507, 20.0, 80.8387, 30.0)),
("25", "06", (74.4924, 20.0, 92.3857, 30.0)),
("26", "06", (85.1342, 20.0, 103.9327, 30.0)),
("27", "06", (95.776, 20.0, 115.4797, 30.0)),
("28", "06", (106.4178, 20.0, 127.0267, 30.0)),
("29", "06", (117.0596, 20.0, 138.5737, 30.0)),
("30", "06", (127.7013, 20.0, 150.1207, 30.0)),
("31", "06", (138.3431, 20.0, 161.6677, 30.0)),
("32", "06", (148.9849, 20.0, 173.2147, 30.0)),
("33", "06", (159.6267, 20.0, 180.0, 30.0)),
("34", "06", (170.2684, 20.0, 180.0, 27.2667)),
("00", "07", (-180.0, 10.0, -172.6141, 19.1917)),
("01", "07", (-180.0, 10.0, -162.4598, 20.0)),
("02", "07", (-170.2684, 10.0, -152.3055, 20.0)),
("03", "07", (-159.6267, 10.0, -142.1513, 20.0)),
("04", "07", (-148.9849, 10.0, -131.997, 20.0)),
("05", "07", (-138.3431, 10.0, -121.8427, 20.0)),
("06", "07", (-127.7013, 10.0, -111.6885, 20.0)),
("07", "07", (-117.0596, 10.0, -101.5342, 20.0)),
("08", "07", (-106.4178, 10.0, -91.3799, 20.0)),
("09", "07", (-95.776, 10.0, -81.2257, 20.0)),
("10", "07", (-85.1342, 10.0, -71.0714, 20.0)),
("11", "07", (-74.4924, 10.0, -60.9171, 20.0)),
("12", "07", (-63.8507, 10.0, -50.7629, 20.0)),
("13", "07", (-53.2089, 10.0, -40.6086, 20.0)),
("14", "07", (-42.5671, 10.0, -30.4543, 20.0)),
("15", "07", (-31.9253, 10.0, -20.3001, 20.0)),
("16", "07", (-21.2836, 10.0, -10.1458, 20.0)),
("17", "07", (-10.6418, 10.0, 0.0089, 20.0)),
("18", "07", (0.0, 10.0, 10.6506, 20.0)),
("19", "07", (10.1543, 10.0, 21.2924, 20.0)),
("20", "07", (20.3085, 10.0, 31.9342, 20.0)),
("21", "07", (30.4628, 10.0, 42.576, 20.0)),
("22", "07", (40.6171, 10.0, 53.2178, 20.0)),
("23", "07", (50.7713, 10.0, 63.8595, 20.0)),
("24", "07", (60.9256, 10.0, 74.5013, 20.0)),
("25", "07", (71.0799, 10.0, 85.1431, 20.0)),
("26", "07", (81.2341, 10.0, 95.7849, 20.0)),
("27", "07", (91.3884, 10.0, 106.4266, 20.0)),
("28", "07", (101.5427, 10.0, 117.0684, 20.0)),
("29", "07", (111.6969, 10.0, 127.7102, 20.0)),
("30", "07", (121.8512, 10.0, 138.352, 20.0)),
("31", "07", (132.0055, 10.0, 148.9938, 20.0)),
("32", "07", (142.1597, 10.0, 159.6355, 20.0)),
("33", "07", (152.314, 10.0, 170.2773, 20.0)),
("34", "07", (162.4683, 10.0, 180.0, 20.0)),
("35", "07", (172.6225, 10.0, 180.0, 19.1833)),
("00", "08", (-180.0, -0.0, -169.9917, 10.0)),
("01", "08", (-172.6225, -0.0, -159.9917, 10.0)),
("02", "08", (-162.4683, -0.0, -149.9917, 10.0)),
("03", "08", (-152.314, -0.0, -139.9917, 10.0)),
("04", "08", (-142.1597, -0.0, -129.9917, 10.0)),
("05", "08", (-132.0055, -0.0, -119.9917, 10.0)),
("06", "08", (-121.8512, -0.0, -109.9917, 10.0)),
("07", "08", (-111.6969, -0.0, -99.9917, 10.0)),
("08", "08", (-101.5427, -0.0, -89.9917, 10.0)),
("09", "08", (-91.3884, -0.0, -79.9917, 10.0)),
("10", "08", (-81.2341, -0.0, -69.9917, 10.0)),
("11", "08", (-71.0799, -0.0, -59.9917, 10.0)),
("12", "08", (-60.9256, -0.0, -49.9917, 10.0)),
("13", "08", (-50.7713, -0.0, -39.9917, 10.0)),
("14", "08", (-40.6171, -0.0, -29.9917, 10.0)),
| |
c, d)
E_phi[i] += alpha[m, n]*GetM_mplus1n_o_plus_phi(eta, ksi, phi, m, n, c, d)
M, N = beta.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += beta[m, n]*GetM_mn_o_z_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += beta[m, n]*GetM_mn_o_z_ksi(eta, ksi, phi, m, n, c, d)
E_phi[i] += beta[m, n]*GetM_mn_o_z_phi(eta, ksi, phi, m, n, c, d)
N = len(gamma)
for n in range(1,N):
E_eta[i] += gamma[n]*GetM_mminus1n_o_minus_eta(eta, ksi, phi, 1, n, c, d)
E_ksi[i] += gamma[n]*GetM_mminus1n_o_minus_ksi(eta, ksi, phi, 1, n, c, d)
E_phi[i] += gamma[n]*GetM_mminus1n_o_minus_phi(eta, ksi, phi, 1, n, c, d)
if totalField:
z_hat_eta = ksi*np.sqrt((1 - eta**2)/(ksi**2 - eta**2))
z_hat_ksi = eta * np.sqrt((ksi**2 - 1)/(ksi**2 - eta**2))
x = d/2*np.sqrt(1 - eta**2)*np.sqrt(ksi**2 - 1)*np.cos(phi)
E_eta[i] += E0*np.exp(1j*k*x)*z_hat_eta
E_ksi[i] += E0*np.exp(1j*k*x)*z_hat_ksi
return E_eta, E_ksi, E_phi
def GetFieldAtCartesianPoints(self, alpha, beta, gamma, r_pts, totalField=True):
## r = [x, y, z]
a, c, d = self.a, self.c, self.d
k = self.k
E0 = self.E0
n_pts = len(r_pts)
E_eta = np.zeros(n_pts, dtype=complex)
E_ksi = np.zeros(n_pts, dtype=complex)
E_phi = np.zeros(n_pts, dtype=complex)
for i in range(n_pts):
x, y, z = r_pts[i]
eta, ksi, phi = self.CoordinatePointTransformRectToSpheroid(x, y, z)
M, N = alpha.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += alpha[m, n]*GetM_mplus1n_o_plus_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += alpha[m, n]*GetM_mplus1n_o_plus_ksi(eta, ksi, phi, m, n, c, d)
E_phi[i] += alpha[m, n]*GetM_mplus1n_o_plus_phi(eta, ksi, phi, m, n, c, d)
M, N = beta.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += beta[m, n]*GetM_mn_o_z_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += beta[m, n]*GetM_mn_o_z_ksi(eta, ksi, phi, m, n, c, d)
E_phi[i] += beta[m, n]*GetM_mn_o_z_phi(eta, ksi, phi, m, n, c, d)
N = len(gamma)
for n in range(1,N):
E_eta[i] += gamma[n]*GetM_mminus1n_o_minus_eta(eta, ksi, phi, 1, n, c, d)
E_ksi[i] += gamma[n]*GetM_mminus1n_o_minus_ksi(eta, ksi, phi, 1, n, c, d)
E_phi[i] += gamma[n]*GetM_mminus1n_o_minus_phi(eta, ksi, phi, 1, n, c, d)
if totalField:
z_hat_eta = ksi*np.sqrt((1 - eta**2)/(ksi**2 - eta**2))
z_hat_ksi = eta * np.sqrt((ksi**2 - 1)/(ksi**2 - eta**2))
#x = d/2*np.sqrt(1 - eta**2)*np.sqrt(ksi**2 - 1)*np.cos(phi)
E_eta[i] += E0*np.exp(1j*k*x)*z_hat_eta
E_ksi[i] += E0*np.exp(1j*k*x)*z_hat_ksi
return E_eta, E_ksi, E_phi
def CoordinatePointTransformSpheroidToRect(self, eta, ksi, phi):
d = self.d
x = d/2*sqrt((1 - eta**2))*sqrt((ksi**2 - 1))*cos(phi)
y = d/2*sqrt((1 - eta**2))*sqrt((ksi**2 - 1))*sin(phi)
z = d/2*eta*ksi
return x, y, z
def CoordinatePointTransformRectToSpheroid(self, x, y, z):
d = self.d
ksi = (np.sqrt(x**2 + y**2 + (z + d/2)**2) + np.sqrt(x**2 + y**2 + (z - d/2)**2))/d
eta = (np.sqrt(x**2 + y**2 + (z + d/2)**2) - np.sqrt(x**2 + y**2 + (z - d/2)**2))/d
phi = np.arctan2(y, x)
return eta, ksi, phi
def PlotFieldAroundTipAtXZPlane(self, Dx, Dz, nx, nz, alpha, beta, gamma, totalField=True):
ksi_0 = self.ksi
a, c, d = self.a, self.c, self.d
E0, k = self.E0, self.k
x = np.linspace(-Dx/2, Dx/2, nx)
z = np.linspace(a/2-Dz/2, a/2+Dz/2, nz)
X, Z = np.meshgrid(x, z, indexing="ij")
r_pts = []
r_inds = []
for i in range(nx):
x_i = x[i]
for j in range(nz):
z_j = z[j]
eta, ksi, phi = self.CoordinatePointTransformRectToSpheroid(x_i, 0.0, z_j)
if ksi > ksi_0:
r_pts.append([x_i, 0.0, z_j])
r_inds.append((i, j))
E_eta, E_ksi, E_phi = self.GetFieldAtCartesianPoints(alpha, beta, gamma, r_pts, totalField)
E_ksi_mesh = np.zeros(X.shape, dtype=complex)
for i in range(len(r_inds)):
E_ksi_mesh[r_inds[i]] = E_ksi[i]
E_ksi_mesh *= np.isfinite(E_ksi_mesh)
#plt.imshow(np.log10(np.abs(E_ksi_mesh.T) + 1.0e-100), origin="lower", vmin=-10)
plt.imshow(np.abs(E_ksi_mesh.T), origin="lower", cmap="rainbow", aspect="auto",
extent=np.array([-Dx/2, Dx/2, -Dz/2, Dz/2])/constants.nano)
plt.colorbar()
plt.xlabel(r'x (nm)', fontsize=18)
plt.ylabel(r'z (nm)', fontsize=18)
plt.title(r'R={:.2f} nm, f={:.2f} THz, L={:.2f} $\mu$m'.format(self.tipRadius/constants.nano, self.freq/constants.tera, self.length/constants.micro), fontsize=18)
plt.savefig("../out/tip_field.png", bbox_inches='tight', pad_inches=0.5)
plt.show()
def GetAlphaBetaGamma(self, vbose=False):
A, b = self.ConstructMatrix()
if vbose:
cond = np.linalg.cond(A)
print("condition: ", cond)
x = np.linalg.solve(A, b)
if vbose:
print("error: ", np.linalg.norm(A.dot(x) - b))
alpha, beta, gamma = self.GetAlphaBetaGamma_from_X(x)
return alpha, beta, gamma
def GetTipEnhancement(self, plotMatrices=False, plotFields=False, varifySurfaceField=False):
print("a = {}, b = {}, ksi = {}, d = {} ".format(self.a, self.b, self.ksi, self.d))
print("c_0 = {} ".format(self.c))
A, b = self.ConstructMatrix()
cond = np.linalg.cond(A)
print("condition: ", cond)
x = np.linalg.solve(A, b)
print("error: ", np.linalg.norm(A.dot(x) - b))
alpha, beta, gamma = self.GetAlphaBetaGamma_from_X(x)
if plotMatrices:
plt.imshow(np.log10(np.abs(alpha) + 1.0e-100))
plt.colorbar()
plt.show()
plt.imshow(np.log10(np.abs(beta) + 1.0e-100))
plt.colorbar()
plt.show()
if varifySurfaceField:
eps = 1.0e-5
etas = np.linspace(0, 1.0-eps, 20, endpoint=True)
phi_0 = 0.0
ksi_0 = self.ksi
d = self.d
E_eta0, E_ksi0 = self.GetETMonSurface_direct(etas, ksi_0, phi_0)
E_eta1, E_ksi1 = self.GetETMonSurface_expansion(etas, ksi_0, phi_0)
E_eta2, E_ksi2, _ = self.GetFieldOnSurface_(alpha, beta, gamma, etas, ksi_0, phi_0)
#np.set_printoptions(precision=5)
print('='*50)
for i in range(len(E_eta0)):
print("{:.4e} {:.4e} {:.4e} || {:.5f}".format(E_eta0[i], E_eta1[i], E_eta2[i], abs(E_eta2[i]/E_eta0[i])))
print('-'*50)
for i in range(len(E_eta0)):
print("{:.3e} {:.3e} {:.3e} || {:.3f}".format(E_ksi0[i], E_ksi1[i], E_ksi2[i], abs(E_ksi2[i])))
if plotFields:
self.PlotFieldAroundTipAtXZPlane(Dx=4.0*self.tipRadius, Dz=4.0*self.tipRadius, nx=20, nz=20,
alpha=alpha, beta=beta, gamma=gamma,
totalField=True)
_, E_ksi, _ = self.GetFieldOnSurface(alpha, beta, gamma, etas=[1.0 - 1.0e-5], ksi=self.ksi, phi=0.0)
return E_ksi[-1], A, b
def GetTipEnhancementFromMatrix(self, alpha, beta, gamma, varifySurfaceField=False):
if varifySurfaceField:
eps = 1.0e-5
etas = np.linspace(0, 1.0-eps, 20, endpoint=True)
phi_0 = 0.0
ksi_0 = self.ksi
E_eta0, E_ksi0 = self.GetETMonSurface_direct(etas, ksi_0, phi_0)
E_eta1, E_ksi1 = self.GetETMonSurface_expansion(etas, ksi_0, phi_0)
E_eta2, E_ksi2, _ = self.GetFieldOnSurface(alpha, beta, gamma, etas, ksi_0, phi_0, totalField=False)
#np.set_printoptions(precision=5)
print('='*50)
for i in range(len(E_eta0)):
print("{:.4e} {:.4e} {:.4e} || {:.5f}".format(E_eta0[i], E_eta1[i], E_eta2[i], abs(E_eta2[i]/E_eta0[i])))
print('-'*50)
for i in range(len(E_eta0)):
print("{:.3e} {:.3e} {:.3e} || {:.3f}".format(E_ksi0[i], E_ksi1[i], E_ksi2[i], abs(E_ksi2[i])))
_, E_ksi, _ = self.GetFieldOnSurface(alpha, beta, gamma, etas=[1.0 - 1.0e-5], ksi=self.ksi, phi=0.0)
return E_ksi[-1]
def GetTipEnhancementOverFrequencyBand(self, freq, Nt):
assert len(freq)==len(Nt)
enhancement = np.zeros(len(freq), dtype=complex)
for i in range(len(freq)):
self.SetFrequency(freq[i])
self.SetNumberOfHarmonics(Nt[i])
enhancement[i] = self.GetTipEnhancement(plotMatrices=True, plotFields=True, varifySurfaceField=True)[0]
print("i:{} Nt:{} f:{}, e:{}".format(i, Nt[i], freq[i]/constants.tera, np.abs(enhancement[i])))
print('='*50)
print('='*50)
return enhancement
def GetAlphaBetaGammaOverFrequencyBand(self, freq, Nt):
assert len(freq) == len(Nt)
matrices = []
for i in range(len(freq)):
self.SetFrequency(freq[i])
self.SetNumberOfHarmonics(Nt[i])
matrices.append(self.GetAlphaBetaGamma())
return matrices
def GetRadialFunc(i, m, n, c, x):
if i == 1:
return pro_rad1(m, n, c, x)[0]
elif i == 2:
return pro_rad2(m, n, c, x)[0]
elif i == 3:
return pro_rad1(m, n, c, x)[0] + 1j*pro_rad2(m, n, c, x)[0]
elif i == 4:
return pro_rad1(m, n, c, x)[0] - 1j*pro_rad2(m, n, c, x)[0]
else:
assert False
def GetDerivativeRadialFunc(i, m, n, c, x):
if i == 1:
return pro_rad1(m, n, c, x)[1]
elif i == 2:
return pro_rad2(m, n, c, x)[1]
elif i == 3:
return pro_rad1(m, n, c, x)[1] + 1j*pro_rad2(m, n, c, x)[1]
elif i == 4:
return pro_rad1(m, n, c, x)[1] - 1j*pro_rad2(m, n, c, x)[1]
else:
assert False
def GetM_mplus1n_o_plus_eta(eta, ksi, phi, m, n, c, d):
return ((ksi**2 - 1.0)*GetDerivativeRadialFunc(4, m, n, c, ksi) - ksi*m*GetRadialFunc(4, m, n, c, ksi))*pro_ang1(m, n, c, eta)[0]*np.cos(phi*(m + 1))/(d*np.sqrt(-eta**2 + ksi**2)*np.sqrt(ksi**2 - 1))
def GetM_mplus1n_o_plus_ksi(eta, ksi, phi, m, n, c, d):
return ((eta**2 - 1)*pro_ang1(m, n, c, eta)[1] - eta*m*pro_ang1(m, n, c, eta)[0])*GetRadialFunc(4, m, n, c, ksi)*np.cos(phi*(m + 1))/(d*np.sqrt(-eta**2 + 1)*np.sqrt(-eta**2 + ksi**2))
def GetM_mplus1n_o_plus_phi(eta, ksi, phi, m, n, c, d):
return -(eta*(ksi**2 - 1)*pro_ang1(m, n, c, eta)[0]*GetDerivativeRadialFunc(4, m, n, c, ksi) - ksi*(eta**2 - 1)*GetRadialFunc(4, m, n, c, ksi)*pro_ang1(m, n, c, eta)[1])*np.sin(phi*m + phi)/(d*(eta**2 - ksi**2))
def GetM_mminus1n_o_minus_eta(eta, ksi, phi, m, n, c, d):
return ((1.0 - ksi**2)*GetDerivativeRadialFunc(4, m, n, c, ksi) - ksi*m*GetRadialFunc(4, m, n, c, ksi))*pro_ang1(m, n, c, eta)[0]*np.cos(phi*(m - 1))/(d*np.sqrt(-eta**2 + ksi**2)*np.sqrt(ksi**2 - 1))
def GetM_mminus1n_o_minus_ksi(eta, ksi, phi, m, n, c, d):
return ((1.0-eta**2)*pro_ang1(m, n, c, eta)[1] - eta*m*pro_ang1(m, n, c, eta)[0])*GetRadialFunc(4, m, n, c, ksi)*np.cos(phi*(m - 1))/(d*np.sqrt(-eta**2 + 1)*np.sqrt(-eta**2 + ksi**2))
def GetM_mminus1n_o_minus_phi(eta, ksi, phi, m, n, c, d):
return -(eta*(ksi**2 - 1)*pro_ang1(m, n, c, eta)[0]*GetDerivativeRadialFunc(4, m, n, c, ksi) - ksi*(eta**2 - 1)*GetRadialFunc(4, m, n, c, ksi)*pro_ang1(m, n, c, eta)[1])*np.sin(phi*(m - 1))/(d*(eta**2 - ksi**2))
def GetM_mn_o_z_eta(eta, ksi, phi, m, n, c, d):
return -2*eta*m*GetRadialFunc(4, m, n, c, ksi)*pro_ang1(m, n, c, eta)[0]*np.cos(phi*m)/(d*np.sqrt(-eta**2 + 1)*np.sqrt(-eta**2 + ksi**2))
def GetM_mn_o_z_ksi(eta, ksi, phi, m, n, c, d):
return 2*ksi*m*GetRadialFunc(4, m, n, c, ksi)*pro_ang1(m, n, c, eta)[0]*np.cos(phi*m)/(d*np.sqrt(-eta**2 + ksi**2)*np.sqrt(ksi**2 - 1))
def GetM_mn_o_z_phi(eta, ksi, phi, m, n, c, d):
return 2*np.sqrt(-eta**2 + 1)*np.sqrt(ksi**2 - 1)*(-eta*GetRadialFunc(4, m, n, c, ksi)*pro_ang1(m, n, c, eta)[1] + ksi*pro_ang1(m, n, c, eta)[0]*GetDerivativeRadialFunc(4, m, n, c, ksi))*np.sin(phi*m)/(d*(eta**2 - ksi**2))
def Get_Int_PmnPmn(m, n):
if m > n:
return 0
fact_mpn_div_fact_mmn = 1.0
for i in range(n + | |
np.uint64, np.int8,
np.int16, np.int32, np.int64
],
# Only the harnesses with "singularity" will have divide by 0
enabled=("singularity" in harness.name))
]
@classmethod
def dot_general(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.bool_],),
# TODO(b/189287598)
Jax2TfLimitation(
"Non-deterministic NaN for dot_general with preferred_element_type on GPU (b/189287598)",
dtypes=[
jnp.bfloat16, np.float16, np.float32, np.complex64
],
devices="gpu",
modes=("eager", "graph", "compiled"),
enabled=(harness.params["preferred_element_type"] is not None),
skip_comparison=True),
# JAX performs float16 matmuls in float32 on CPU, so the JAX result
# may be more precise.
custom_numeric(dtypes=[np.float16], devices=["cpu"], tol=1e-2,
modes=("eager", "graph", "compiled")),
]
@classmethod
def eig(cls, harness: primitive_harness.Harness):
compute_left_eigenvectors = harness.params["compute_left_eigenvectors"]
compute_right_eigenvectors = harness.params["compute_right_eigenvectors"]
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
inner_dimension = operand.shape[-1]
# Test ported from tests.linlag_test.testEig
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((inner_dimension + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
tst.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
# TODO(bchetioui): numerical discrepancies
if dtype in [np.float32, np.complex64]:
tol = 1e-4
elif dtype in [np.float64, np.complex128]:
tol = 1e-13
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff,
np.array(0., closest_diff.dtype),
atol=tol,
err_msg=err_msg)
all_w_jax, all_w_tf = result_jax[0], result_tf[0]
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
if compute_left_eigenvectors:
check_left_eigenvectors(operand, all_w_tf, result_tf[1])
if compute_right_eigenvectors:
check_right_eigenvectors(operand, all_w_tf,
result_tf[1 + compute_left_eigenvectors])
return [
# Eig does not work in JAX on gpu or tpu
Jax2TfLimitation(
"function not compilable", modes="compiled", devices="cpu"),
Jax2TfLimitation(
"TF Conversion of eig is not implemented when both compute_left_eigenvectors and compute_right_eigenvectors are set to True",
enabled=(compute_left_eigenvectors and compute_right_eigenvectors)),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."))
]
@classmethod
def eigh(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
inner_dimension = operand.shape[-1]
def check_right_eigenvectors(a, w, vr):
tol = 1e-16
# TODO(bchetioui): tolerance needs to be very high in compiled mode,
# specifically for eigenvectors.
if dtype == np.float64:
tol = 2e-5
elif dtype == np.float32:
tol = 1e-2
elif dtype in [dtypes.bfloat16, np.complex64]:
tol = 1e-3
elif dtype == np.complex128:
tol = 2e-5
tst.assertAllClose(
np.matmul(a, vr) - w[..., None, :] * vr,
np.zeros(a.shape, dtype=vr.dtype),
atol=tol,
# For bfloat16 the np.matmul returns float32 result.
check_dtypes=False,
err_msg=err_msg)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
if dtype in [dtypes.bfloat16, np.float32, np.complex64]:
tol = 1e-3
elif dtype in [np.float64, np.complex128]:
tol = 1e-5
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff,
np.array(0., closest_diff.dtype),
atol=tol,
err_msg=err_msg)
_, all_w_jax = result_jax
all_vr_tf, all_w_tf = result_tf
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
check_right_eigenvectors(operand, all_w_tf, all_vr_tf)
return [
missing_tf_kernel(
dtypes=dtypes.bfloat16,
devices="tpu",
enabled=(harness.params["shape"] != (0, 0)), # This actually works!
),
Jax2TfLimitation(
"TODO: numeric discrepancies",
dtypes=np.float16,
devices="tpu",
expect_tf_error=False,
skip_comparison=True),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."),
modes=("eager", "graph", "compiled"))
]
@classmethod
def erf(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erfc(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erf_inv(cls, harness: primitive_harness.Harness):
# erf_inv is not defined for arg <= -1 or arg >= 1
def custom_assert(tst, result_jax, result_tf, *, args, tol,
err_msg): # noqa: F811
arg, = args
# for arg < -1 or arg > 1
# lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf
special_cases = (arg < -1.) | (arg > 1.)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=[np.float32, np.float64], tol=1e-4),
custom_numeric(
dtypes=[np.float32, np.float64],
custom_assert=custom_assert,
description=(
"May return different results at undefined points (< -1 or > 1):"
" JAX returns `NaN` and TF returns `+inf` or `-inf`."))
]
@classmethod
def expm1(cls, harness: primitive_harness.Harness):
return [custom_numeric(dtypes=np.float64, tol=1e-5)]
@classmethod
def fft(cls, harness):
return [
Jax2TfLimitation(
"TF function not compileable",
devices=("cpu", "gpu"),
dtypes=[np.float64, np.complex128],
modes="compiled"),
# TODO: very high tolerance
custom_numeric(tol=1e-3, modes=("eager", "graph", "compiled")),
]
@classmethod
def _pow_test_util(cls, harness: primitive_harness.Harness):
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
# NaNs are mismatched, but assertAllClose will also behave weirdly for
# complex numbers containing np.inf as one of their components. See
# https://github.com/numpy/numpy/issues/15959 for more details.
mask = (
np.isnan(result_jax) + np.isnan(result_tf) + np.isinf(result_jax) +
np.isinf(result_tf))
tst.assertAllClose(
result_jax[~mask], result_tf[~mask], rtol=tol, err_msg=err_msg)
return [
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-3),
custom_numeric(
dtypes=[np.float64, np.complex128],
devices=("cpu", "gpu"),
tol=5e-5),
custom_numeric(
dtypes=[np.complex64, np.complex128],
custom_assert=custom_assert,
)
]
@classmethod
def igamma(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igamma is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
arg1, arg2 = args
# lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0
special_cases = (arg1 == 0.) & (arg2 == 0.)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_jax[special_cases])
tst.assertAllClose(
np.full((nr_special_cases,), 0., dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different results at undefined points "
"(both arguments 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`"))
]
@classmethod
def igammac(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igammac is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol,
err_msg): # noqa: F811
arg1, arg2 = args
# lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN
special_cases = (arg1 <= 0.) | (arg2 <= 0)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), 1., dtype=dtype),
result_jax[special_cases],
err_msg=err_msg)
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_tf[special_cases],
err_msg=err_msg)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-9),
custom_numeric(devices="gpu", tol=1e-3),
custom_numeric(
custom_assert=custom_assert,
devices=("cpu", "gpu"),
description=(
"May return different results at undefined points "
"(both arguments less or equal 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`")),
]
@classmethod
def integer_pow(cls, harness: primitive_harness.Harness):
y = harness.params["y"]
return [
missing_tf_kernel(
dtypes=[
np.int8, np.int16, np.uint8, np.uint16, np.uint32, np.uint64
],
modes="graph",
enabled=(y not in [0, 1]), # These are special-cased
devices=("cpu", "gpu")),
# TODO: on TPU, for f16, we get different results with eager mode
# than with compiled mode.
Jax2TfLimitation(
"Different overflow behavior. ",
dtypes=[np.float16, jnp.bfloat16],
devices="tpu",
expect_tf_error=False,
modes=("eager", "graph"),
skip_comparison=True),
Jax2TfLimitation(
"Different overflow behavior for large exponents. ",
dtypes=[
np.int8, np.int16, np.int32, np.int64, np.float16, jnp.bfloat16,
np.float32, np.complex64, np.complex128
],
enabled=(abs(y) > 10),
expect_tf_error=False,
modes=("eager", "graph"),
skip_comparison=True),
] + list(cls._pow_test_util(harness))
@classmethod
def pow(cls, harness: primitive_harness.Harness):
return cls._pow_test_util(harness)
@classmethod
def lgamma(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-11),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def log1p(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex128, tol=3e-14),
custom_numeric(dtypes=np.float64, tol=1e-10),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def lu(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
lu, pivots, perm = result_tf
batch_dims = operand.shape[:-2]
m, n = operand.shape[-2], operand.shape[-1]
def _make_permutation_matrix(perm):
result = []
for idx in itertools.product(*map(range, operand.shape[:-1])):
result += [0 if c != perm[idx] else 1 for c in range(m)]
result = np.reshape(np.array(result, dtype=dtype), [*batch_dims, m, m])
return result
k = min(m, n)
l = jnp.tril(lu, -1)[..., :, :k] + jnp.eye(m, k, dtype=dtype)
u = jnp.triu(lu)[..., :k, :]
p_mat = _make_permutation_matrix(perm)
tst.assertArraysEqual(
lax.linalg.lu_pivots_to_permutation(pivots, m), perm)
tst.assertAllClose(
jnp.matmul(p_mat, operand),
jnp.matmul(l, u),
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
custom_numeric(
dtypes=[np.float32, np.complex64], devices="tpu", tol=0.1),
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-5),
custom_numeric(dtypes=[np.float64, np.complex128], tol=1e-13),
custom_numeric(
custom_assert=custom_assert,
description=("May return different, but also correct, results when "
"the decomposition is not unique"),
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
]
@classmethod
def max(cls, | |
set_CloudPercentage(self, CloudPercentage): self.CloudPercentage = CloudPercentage
def validate_File_IDType(self, value):
# Validate type File_IDType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_File_IDType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_File_IDType_patterns_, ))
validate_File_IDType_patterns_ = [['^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_D\\d{2}_N\\d{2}\\.\\d{2}$', '^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_A\\d{6}_T[\\w{Lu}_]{5}_N\\d{2}\\.\\d{2}$', '^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_N\\d{2}\\.\\d{2}$']]
def validate_Parent_IDType(self, value):
# Validate type Parent_IDType, a restriction on item:DATASTRIP_ID.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Parent_IDType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Parent_IDType_patterns_, ))
validate_Parent_IDType_patterns_ = [['^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_N\\d{2}\\.\\d{2}$', '^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_N\\d{2}\\.\\d{2} S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_N\\d{2}\\.\\d{2}$']]
def validate_Group_IDType(self, value):
# Validate type Group_IDType, a restriction on item:DATATAKE_ID.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Group_IDType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Group_IDType_patterns_, ))
validate_Group_IDType_patterns_ = [['^GS2(A$|^B)_\\d{8}T\\d{6}_\\d{6}_N\\d{2}\\.\\d{2}$']]
def validate_File_NameType(self, value):
# Validate type File_NameType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_File_NameType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_File_NameType_patterns_, ))
validate_File_NameType_patterns_ = [['^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_D\\d{2}_N\\d{2}\\.\\d{2}$', '^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_A\\d{6}_T[\\w{Lu}_]{5}_N\\d{2}\\.\\d{2}$', '^S2(A$|^B)_OPER_[\\w{Lu}_]{10}_[\\w{Lu}_]{4}_\\d{8}T\\d{6}_S\\d{8}T\\d{6}_N\\d{2}\\.\\d{2}$']]
def validate_File_VersionType(self, value):
# Validate type File_VersionType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_File_VersionType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_File_VersionType_patterns_, ))
validate_File_VersionType_patterns_ = [['^[0-9]{1,2}\\.[0-9]{1,2}$']]
def validate_SystemType(self, value):
# Validate type SystemType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['S2PDGS-DPC', 'S2PDGS-MCC']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on SystemType' % {"value" : value.encode("utf-8")} )
def validate_SourceType(self, value):
# Validate type SourceType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_SourceType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_SourceType_patterns_, ))
validate_SourceType_patterns_ = [['^L(0$|^1A$|^1B$|^1C)_Processor$']]
def validate_Source_Sw_VersionType(self, value):
# Validate type Source_Sw_VersionType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Source_Sw_VersionType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Source_Sw_VersionType_patterns_, ))
validate_Source_Sw_VersionType_patterns_ = [['^\\d{1,2}\\.\\d{1,2}(\\.\\d{1,2})*$']]
def validate_Generation_TimeType(self, value):
# Validate type Generation_TimeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Generation_TimeType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Generation_TimeType_patterns_, ))
validate_Generation_TimeType_patterns_ = [['^UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})?$']]
def validate_Validity_StartType(self, value):
# Validate type Validity_StartType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Validity_StartType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Validity_StartType_patterns_, ))
validate_Validity_StartType_patterns_ = [['^UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})?$', '^UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})? UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})?$']]
def validate_Validity_StopType(self, value):
# Validate type Validity_StopType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Validity_StopType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Validity_StopType_patterns_, ))
validate_Validity_StopType_patterns_ = [['^UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})?$', '^UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})? UTC=(\\d{4}-(((01$|^03$|^05$|^07$|^08$|^10$|^12)-(0[1-9]$|^[1,2][0-9]$|^3[0,1]))$|^((04$|^06$|^09$|^11)-(0[1-9]$|^[1,2][0-9]$|^30))$|^(02-(0[1-9]$|^[1,2][0-9]))))T(([0,1][0-9]$|^2[0-3])(:[0-5][0-9]){2})(\\.\\d{6})?$']]
def validate_Start_Orbit_NumberType(self, value):
# Validate type Start_Orbit_NumberType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Start_Orbit_NumberType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Start_Orbit_NumberType_patterns_, ))
validate_Start_Orbit_NumberType_patterns_ = [['^\\d{6}$']]
def validate_Stop_Orbit_NumberType(self, value):
# Validate type Stop_Orbit_NumberType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_Stop_Orbit_NumberType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_Stop_Orbit_NumberType_patterns_, ))
validate_Stop_Orbit_NumberType_patterns_ = [['^\\d{6}$']]
def validate_Quality_InfoType(self, value):
# Validate type Quality_InfoType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on Quality_InfoType' % {"value" : value} )
if value > 100:
warnings_.warn('Value "%(value)s" does not match xsd maxInclusive restriction on Quality_InfoType' % {"value" : value} )
def validate_File_TypeType(self, value):
# Validate type File_TypeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if len(value) != 10:
warnings_.warn('Value "%(value)s" does not match xsd length restriction on File_TypeType' % {"value" : value.encode("utf-8")} )
def validate_DetectorType(self, value):
# Validate type DetectorType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_DetectorType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_DetectorType_patterns_, ))
validate_DetectorType_patterns_ = [['^(0[1-9])$', '^1[012]$']]
def validate_File_ClassType(self, value):
# Validate type File_ClassType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['RT', 'NRT', 'NOM', 'TEST-RT', 'TEST-NRT', 'TEST-NOM', 'NA']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on File_ClassType' % {"value" : value.encode("utf-8")} )
def validate_Sensor_CodeType(self, value):
# Validate type Sensor_CodeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
pass
def validate_Sensor_ModeType(self, value):
# Validate type Sensor_ModeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['NOM', 'DSC', 'ABC', 'VIC', 'RAW', 'TDI', 'NA_']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Sensor_ModeType' % {"value" : value.encode("utf-8")} )
if len(value) != 3:
warnings_.warn('Value "%(value)s" does not match xsd length restriction on Sensor_ModeType' % {"value" : value.encode("utf-8")} )
def validate_Acquisition_StationType(self, value):
# Validate type Acquisition_StationType, a restriction on center:A_S2_ACQUISITION_CENTER.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['MTI_', 'SGS_', 'MPS_']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Acquisition_StationType' % {"value" : value.encode("utf-8")} )
def validate_Processing_StationType(self, value):
# Validate type Processing_StationType, a restriction on center:A_S2_PROCESSING_CENTRE.
if value is not None and Validate_simpletypes_:
pass
def validate_Satellite_CodeType(self, value):
# Validate type Satellite_CodeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['S2A', 'S2B', 'NIL']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Satellite_CodeType' % {"value" : value.encode("utf-8")} )
def validate_Ascending_FlagType(self, value):
# Validate type Ascending_FlagType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['true', 'false']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Ascending_FlagType' % {"value" : value.encode("utf-8")} )
def validate_CloudPercentageType(self, value):
# Validate type CloudPercentageType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on CloudPercentageType' % {"value" : value} )
if value > 100:
warnings_.warn('Value "%(value)s" does not match xsd maxInclusive restriction on CloudPercentageType' % {"value" : value} )
def hasContent_(self):
if (
self.File_ID is not None or
self.Parent_ID is not None or
self.Group_ID is not None or
self.File_Name is not None or
self.File_Version is not None or
self.System is not None or
self.Source is not None or
self.Source_Sw_Version is not None or
self.Generation_Time is not None or
self.Validity_Start is not None or
self.Validity_Stop is not None or
self.Start_Orbit_Number is not None or
self.Stop_Orbit_Number is not None or
self.Geographic_Localization is not None or
self.Quality_Info is not None or
self.Data_Size is not None or
self.File_Type is not None or
self.Detector is not None or
self.File_Class is not None or
self.Sensor_Code is not None or
self.Sensor_Mode is not None or
self.Acquisition_Station is not None or
self.Processing_Station is not None or
self.Satellite_Code is not None or
self.Ascending_Flag is not None or
self.CloudPercentage is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='Inventory_Metadata', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Inventory_Metadata')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
| |
<filename>data_collection/scraper.py
'''
Daily data collection... scraped game result data from baseball-reference.com and then upcoming games for next period
to create dataframe to feed into model.
**NOTE** While the model is in production right now, there is no 2020 baseball season at the moment due to COVID19.
Thus, the model is currently using a timedelta of 1 year: it is simulating and offering predictions for the 2019
season as if it was being played as the 2020 season.
'''
from random import choice
import re
from dataclasses import dataclass, field
from bs4 import BeautifulSoup, Comment
import requests
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import time
import os
import sys
import json
USER_AGENT = [
r"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"
]
BASE_URL = 'https://www.baseball-reference.com/boxes/?date={}-{}-{}'
BOX_SCORE = "https://www.baseball-reference.com{}"
OUTPUT_DIR = "./all_data/"
OUTPUT_PATH = "./all_data/current_season.csv"
ELO_LOC = "https://projects.fivethirtyeight.com/mlb-api/mlb_elo_latest.csv"
def prep_dir(output_dir = OUTPUT_DIR):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def write_elo(path = ELO_LOC, output_dir = OUTPUT_DIR):
try:
elo = pd.read_csv(path)
except Exception as e:
print("There was an error handling the Elo data: {}".format(e))
elo.to_csv("{}daily_elo.csv.gz".format(OUTPUT_DIR), compression = "gzip", index = False)
print("Elo data written to disk")
@dataclass
class BRScraper():
date: str = None #YYYY-MM-DD format only
base_url: str = BASE_URL
box_score: str = BOX_SCORE
output_path: str = OUTPUT_PATH
user_agents: list = None
proxies: dict = None
scrape_delay: int = 5
daily_links: list = field(default_factory = list)
game_soups: list = field(default_factory = list)
parsed_games: list = field(default_factory = list)
today_games: list = field(default_factory = list)
def get_today_games(self):
self.daily_scrape()
with open("./all_data/team_map.json", "r+") as f:
team_map = json.load(f)
for game_soup in self.game_soups:
d = {}
d["date"] = self.date
teams = [i.get_text() for i in game_soup.find('div',
{'class' : 'scorebox'}).findAll('a', {'itemprop' : 'name'})]
teams = ["".join(team.split(" ")) for team in teams]
teams = [i.replace(".", "") for i in teams]
d["road_team"], d["home_team"] = team_map[teams[0]], team_map[teams[1]]
comment_wrappers = game_soup.findAll('div', {'class' : 'section_wrapper setup_commented commented'})
for wrapper in comment_wrappers:
if wrapper.find('span', {'data-label' : 'Pitching Lines and Info'}):
all_pitching = BeautifulSoup(wrapper.find(text = lambda text: isinstance(text, Comment)),
'html.parser')
for i in range(len(teams)):
tag3 = "all_" + teams[i] + "pitching"
starting_pitching = all_pitching.find('div', {'id' : tag3}).find('tbody').findAll('tr')[0]
starter = starting_pitching.find('th')['data-append-csv']
if i == 0:
d["road_starter"] = starter
else:
d["home_starter"] = starter
self.today_games.append(d)
game_frames = pd.DataFrame(self.today_games)
game_frames.to_csv("./all_data/today_games.csv", index = False)
print("Today's games have been written to disk")
def write_to_file(self):
if not self.parsed_games:
raise ValueError("daily_scrape and parse_data methods must be run first")
all_batting = pd.DataFrame([i[0] for i in self.parsed_games])
all_pitching = pd.DataFrame([i[1] for i in self.parsed_games])
team_map = {
"CincinnatiReds" : "CIN",
"KansasCityRoyals" : "KCR",
"LosAngelesDodgers" : "LAD",
"MiamiMarlins" : "FLA",
"MilwaukeeBrewers" : "MIL",
"MinnesotaTwins" : "MIN",
"NewYorkYankees" : "NYY",
"OaklandAthletics" : "OAK",
"PhiladelphiaPhillies" : "PHI",
"SanDiegoPadres" : "SDP",
"SeattleMariners" : "SEA",
"TampaBayRays" : "TBD",
"TexasRangers" : "TEX",
"TorontoBlueJays" : "TOR",
"WashingtonNationals" : "WSN",
"AtlantaBraves" : "ATL",
"ClevelandIndians" : "CLE",
"PittsburghPirates" : "PIT",
"LosAngelesAngels" : "ANA",
"BaltimoreOrioles" : "BAL",
"DetroitTigers" : "DET",
"NewYorkMets" : "NYM",
"ArizonaDiamondbacks" : "ARI",
"ChicagoWhiteSox" : "CHW",
"ColoradoRockies" : "COL",
"HoustonAstros" : "HOU",
"SanFranciscoGiants" : "SFG",
"StLouisCardinals" : "STL",
"BostonRedSox" : "BOS",
"ChicagoCubs" : "CHC"
}
dfs = [all_batting, all_pitching]
updated_frames = []
for df in dfs:
df["date"] = pd.to_datetime(df.date, format = "%Y-%m-%d")
df['year'] = pd.DatetimeIndex(df.date).year
df = df.assign(is_doubleheader = 0, is_tripleheader = 0)
game_counts = df.groupby('home').date.value_counts()
double_headers = game_counts[game_counts == 2]
triple_headers = game_counts[game_counts > 2]
all_double_headers = []
for j in double_headers.index:
all_double_headers.append(j)
all_triple_headers = []
for k in triple_headers.index:
all_triple_headers.append(k)
for index in all_double_headers:
game_indices = df[(df.home == index[0]) & (df.date == index[1])].index
if len(game_indices) > 1:
df.at[game_indices[1], 'is_doubleheader'] = 1
else:
print(index)
for index_ in all_triple_headers:
game_indices_ = df[(df.home == index_[0]) & (df.date == index_[1])].index
if len(game_indices_) == 3:
df.at[game_indices_[1], 'is_doubleheader'] = 1
df.at[game_indices_[2], 'is_tripleheader'] = 1
else:
print(index_)
updated_frames.append(df)
merge_cols = ["date", "visitor", "home", "is_doubleheader", "is_tripleheader", "year"]
full_frame = updated_frames[0].merge(updated_frames[1], how = "left", left_on = merge_cols,
right_on = merge_cols)
full_frame["home_team"] = full_frame.home.map(team_map)
full_frame["road_team"] = full_frame.visitor.map(team_map)
full_frame = full_frame.drop(columns = ["home", "visitor"])
full_frame = full_frame.drop(columns = ["visitorstarter", "homestarter"])
full_frame = full_frame.rename({"homeretrosheet_id" : "home_starter",
"visitorretrosheet_id" : "road_starter"}, axis = 1)
all_cols = list(full_frame.columns)
updated_cols = []
for col in all_cols:
col = col.replace("visitorstarter", "road_starter_")
col = col.replace("visitorbullpen", "road_relief_")
col = col.replace("homestarter", "home_starter_")
col = col.replace("homebullpen", "home_relief_")
if "starter" not in col or "relief" not in col:
col = col.replace("home", "home_")
col = col.replace("visitor", "road_")
col = col.replace("__", "_")
col = col.replace("SO", "K")
updated_cols.append(col)
full_frame.columns = updated_cols
prefix = ["home_", "road_"]
for pre in prefix:
full_frame[pre + "TB"] = (full_frame[pre + "HR"] * 4) + (full_frame[pre + "3B"] * 3) +\
(full_frame[pre + "2B"] * 2) + full_frame[pre + "1B"]
full_frame = full_frame.rename(columns = {"year" : "season"})
prefix = ["home_", "road_", "home_starter_", "home_relief_", "road_starter_", "road_relief_"]
for pre in prefix:
full_frame[pre + "SAC"] = full_frame[pre + "SF"] + full_frame[pre + "SH"]
full_frame = full_frame.drop(columns = ["home_SF", "home_SH", "road_SF", "road_SH",
"home_starter_SF", "home_starter_SH", "home_relief_SF",
"home_relief_SH", "road_starter_SF", "road_starter_SH", "road_relief_SH",
"road_relief_SF"])
cols = ["home_starter_IP", "road_starter_IP", "home_relief_IP", "road_relief_IP"]
for col in cols:
full_frame[col] = full_frame[col].astype("str")
for k in range(len(full_frame)):
full_frame.at[k, col] = full_frame.iloc[k][col].replace(".1", ".33")
full_frame.at[k, col] = full_frame.iloc[k][col].replace(".2", ".67")
full_frame.at[k, col] = full_frame.iloc[k][col].replace(".8", ".33")
full_frame.at[k, col] = full_frame.iloc[k][col].replace(".9", ".67")
full_frame[col] = full_frame[col].astype("float32")
prefix = ["home_starter_", "road_starter_", "home_relief_", "road_relief_"]
for pre in prefix:
full_frame[pre + "AB"] = full_frame[pre + "H"] + (full_frame[pre + "IP"] * 3).astype("int32")
full_frame[pre + "PA"] = (full_frame[pre + "IP"] * 3).astype("int32") + full_frame[pre + "H"] +\
full_frame[pre + "BB"] + full_frame[pre + "IBB"] + full_frame[pre + "HBP"] +\
full_frame[pre + "SAC"]
prefix = ["home_starter_", "road_starter_", "home_relief_", "road_relief_"]
for pre in prefix:
full_frame[pre + "1B"] = full_frame[pre + "H"] - full_frame[pre + "2B"] + full_frame[pre + "3B"] +\
full_frame[pre + "HR"]
with open("./adv_metric_constants/wOBA_weights.json", "r+") as f:
wOBA_weights = json.load(f)
wOBA = pd.DataFrame(wOBA_weights)
change_cols = ["wOBA", "wOBAScale", "wBB", "wHBP", "w1B", "w2B", "w3B", "wHR",
"runSB", "runCS", "R/PA", "R/W", "cFIP"]
wOBA["Season"] = wOBA.Season.astype("int64")
for col in change_cols:
wOBA[col] = wOBA[col].astype("float32")
wOBA = wOBA.rename(columns = {"Season" : "season"})
full_frame = full_frame.merge(wOBA, how = "left", left_on = ["season"],
right_on = ["season"])
teams = requests.get("https://www.retrosheet.org/TEAMABR.TXT").content
team_soup = BeautifulSoup(teams, "html.parser").get_text().split("\n")
leagues = {}
pattern = r'([\w]+)'
for row in team_soup:
vals = re.findall(pattern, row)
try:
leagues[vals[0]] = vals[1]
except:
continue
with open("./adv_metric_constants/modern_rc.json", "r+") as f:
retrosheet_codes = json.load(f)
retrosheet_codes.update({"MIA" : "FLA"})
pop_list = []
for key in leagues:
if key not in retrosheet_codes:
pop_list.append(key)
for key in pop_list:
leagues.pop(key)
leagues["MIA"] = "NL"
df = pd.DataFrame({"team_code" : list(leagues.keys()), "league" : list(leagues.values())})
df["elo_code"] = df.team_code.map(retrosheet_codes)
elo_leagues = {}
for key in retrosheet_codes:
elo_leagues[retrosheet_codes[key]] = leagues[key]
elo_leagues["HOU"] = "AL"
full_frame["home_league"] = full_frame.home_team.map(elo_leagues)
full_frame["road_league"] = full_frame.road_team.map(elo_leagues)
al_league_wRC = pd.read_csv("./adv_metric_constants/league_wRC_AL.csv")
nl_league_wRC = pd.read_csv("./adv_metric_constants/league_wRC_NL.csv")
al_league_wRC = al_league_wRC[["Season", "PA", "wRC"]].rename(columns = {
"Season" : "season",
"PA" : "league_PA",
"wRC" : "league_wRC"
})
nl_league_wRC = nl_league_wRC[["Season", "PA", "wRC"]].rename(columns = {
"Season" : "season",
"PA" : "league_PA",
"wRC" : "league_wRC"
})
al_league_wRC = al_league_wRC.assign(league = "AL")
nl_league_wRC = nl_league_wRC.assign(league = "NL")
league_wRC = pd.concat([al_league_wRC, nl_league_wRC], axis = 0).sort_values(by = ["season"]).reset_index(drop = True)
full_frame = full_frame.merge(league_wRC, how = "left", left_on = ["season", "home_league"],
right_on = ["season", "league"])
full_frame = full_frame.drop(columns = ["league"]).rename(columns = {
"league_PA" : "home_league_PA",
"league_wRC" : "home_league_wRC"
}
)
full_frame = full_frame.merge(league_wRC, how = "left", left_on = ["season", "road_league"],
right_on = ["season", "league"])
full_frame = full_frame.drop(columns = ["league"]).rename(columns = {
"league_PA" : "road_league_PA",
"league_wRC" : "road_league_wRC"
})
all_stadiums = pd.read_csv("./adv_metric_constants/all_stadiums_w_park_ids.csv", index_col = 0)
park_factors = all_stadiums[["team_code", "year", "batting_park_factor"]]
full_frame = full_frame.merge(park_factors, how = "left",
left_on = ["home_team", "season"],
right_on = ["team_code", "year"])
full_frame = full_frame.drop(columns = ["team_code", "year"])
full_frame = full_frame.rename(columns = {"batting_park_factor" : "home_batting_park_factor"})
full_frame.home_batting_park_factor = full_frame.home_batting_park_factor / 100.
if os.path.isfile(self.output_path) or os.path.islink(self.output_path):
with open(self.output_path, "a") as f:
full_frame.to_csv(f, header = False, index = False)
else:
full_frame.to_csv(self.output_path, index = False)
| |
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN, System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item type that the
System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer represents. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetItemType.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemType that is returned by
System.Windows.Automation.AutomationProperties.GetItemType(System.Windows.Depend
encyObject).
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: UIElementAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetLabeledBy.
Returns: The System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: FrameworkElementAutomationPeer) -> str
Gets the text label of the System.Windows.ContentElement that is associated
with this System.Windows.Automation.Peers.ContentElementAutomationPeer. Called
by System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: The text label of the element that is associated with this automation peer.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: UIElementAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
laid out in a specific direction. This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetOrientation.
Returns: The System.Windows.Automation.Peers.AutomationOrientation.None enumeration
value.
"""
pass
def GetPattern(self, patternInterface):
"""
GetPattern(self: CalendarAutomationPeer, patternInterface: PatternInterface) -> object
Gets the object that supports the specified control pattern of the element that
is associated with this automation peer.
patternInterface: An enumeration value that specifies the control pattern.
Returns: If patternInterface is System.Windows.Automation.Peers.PatternInterface.Grid,
System.Windows.Automation.Peers.PatternInterface.Table,
System.Windows.Automation.Peers.PatternInterface.MultipleView, or
System.Windows.Automation.Peers.PatternInterface.Selection, this method returns
a this pointer; otherwise, this method returns null.
"""
pass
def GetPeerFromPointCore(self, *args): #cannot find CLR method
""" GetPeerFromPointCore(self: AutomationPeer, point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self, *args): #cannot find CLR method
"""
HasKeyboardFocusCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
currently has keyboard input focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.HasKeyboardFocus.
Returns: true if the element has keyboard input focus; otherwise, false.
"""
pass
def IsContentElementCore(self, *args): #cannot find CLR method
"""
IsContentElementCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
an element that contains data that is presented to the user. This method is
called by System.Windows.Automation.Peers.AutomationPeer.IsContentElement.
Returns: true.
"""
pass
def IsControlElementCore(self, *args): #cannot find CLR method
"""
IsControlElementCore(self: UIElementAutomationPeer) -> bool
Gets or sets a value that indicates whether the System.Windows.UIElement that
is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
is understood by the end user as interactive. Optionally, the user might
understand the System.Windows.UIElement as contributing to the logical
structure of the control in the GUI. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsControlElement.
Returns: true.
"""
pass
def IsEnabledCore(self, *args): #cannot find CLR method
"""
IsEnabledCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: A boolean that contains the value of System.Windows.UIElement.IsEnabled.
"""
pass
def IsKeyboardFocusableCore(self, *args): #cannot find CLR method
"""
IsKeyboardFocusableCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: true if the element is focusable by the keyboard; otherwise false.
"""
pass
def IsOffscreenCore(self, *args): #cannot find CLR method
"""
IsOffscreenCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
off the screen. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsOffscreen.
Returns: true if the element is not on the screen; otherwise, false.
"""
pass
def IsPasswordCore(self, *args): #cannot find CLR method
"""
IsPasswordCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
contains protected content. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsPassword.
Returns: false.
"""
pass
def IsRequiredForFormCore(self, *args): #cannot find CLR method
"""
IsRequiredForFormCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject), if it's set; otherwise false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
""" SetFocusCore(self: CalendarAutomationPeer) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: Calendar) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class CalendarButtonAutomationPeer(FrameworkElementAutomationPeer):
"""
Exposes System.Windows.Controls.Primitives.CalendarButton types to UI Automation.
CalendarButtonAutomationPeer(owner: Button)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: Button) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class ToggleButtonAutomationPeer(ButtonBaseAutomationPeer, IToggleProvider):
"""
Exposes System.Windows.Controls.Primitives.ToggleButton types to UI Automation.
ToggleButtonAutomationPeer(owner: ToggleButton)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: ButtonBaseAutomationPeer) -> str
Gets the accelerator | |
in png) or ('g102_'+self.MASTversion+'_2dstack.png' in png)):
self.GUIimage = png
if (self.inGUIimage == 'G141stack') & \
(('G141_stack.png' in png) or ('g141_'+self.MASTversion+'_2dstack.png' in png)):
self.GUIimage = png
if self.GUIimage == None: # if requested image not found for object use first png figure instead
self.GUIimage = pngorderedlist[0]
# Getting number of PAs for current object
if self.MASTfiles:
searchext = '_1d.png'
else:
searchext = '.1D.png'
twodpng = glob.glob(self.dir+'*'+idstr+'*'+searchext)
self.PAs = np.zeros(len(twodpng))
for ii in xrange(len(self.PAs)):
if self.MASTfiles:
namesplit = os.path.basename(twodpng[ii]).split('-pa')
self.PAs[ii] = namesplit[-1][:3]
else:
namesplit = os.path.basename(twodpng[ii]).split('-')
self.PAs[ii] = int(namesplit[1])
if namesplit[0] in ['MACS0416.1','MACS2129.4','RXJ1347.5']: # case of names with negative dec
self.PAs[ii] = int(namesplit[2])
self.PAs = np.sort(np.unique(self.PAs)) # Make sure the PAs are sorted
self.Npa = len(self.PAs)
self.pPNG = subprocess.Popen(opencmd,shell=True,executable=os.environ["SHELL"])
time.sleep(1.1)# sleep to make sure png appear in PIDlist
if self.plat == 'darwin':
self.pngPID = vi.getPID('Preview.app',verbose=False) # get PID of png process
elif self.plat == 'linux2' or 'Linux':
self.pngPID = vi.getPID('gthumb',verbose=False) # get PID of png process
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def openfits_but(self,position):
"""
Button to open fits files
"""
self.fitsb = Button(self)
self.fitsb["text"] = "(0) Open fits files"
if self.xpa:
self.fitsb["command"] = self.openfits_but_cmd_xpa
else:
self.fitsb["command"] = self.openfits_but_cmd
self.fitsb.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def openfits_but_cmd_xpa(self):
"""
Command for openfits button
"""
self.regiontemp = 'temp_ds9_forinspection.reg'
idstr = str("%05d" % self.currentobj)
lockstr = self.lockds9string()
ds9cmd = ' '
if not self.ds9windowopen:
ds9cmd = ds9cmd+'ds9 -geometry 1200x600 -scale zscale '+\
lockstr+' -tile grid layout 4 '+str(2*int(self.Npamax))
self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ["SHELL"])
time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist
self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process
self.ds9windowopen = True
time.sleep(1.0)
for ii in np.arange(1,17):
out = commands.getoutput('xpaset -p ds9 frame new')
out = commands.getoutput('xpaset -p ds9 tile')
Fstart = 1
for PA in self.PAs:
PAstr = '-'+str("%03d" % int(PA))+'-'
if self.MASTfiles:
searchexpression = self.dir+'*'+idstr+'*-pa'+PAstr[1:-1]+'_*2d.fits'
else:
searchexpression = self.dir+'*'+PAstr+'*'+idstr+'*2D.fits'
fits_2D = glob.glob(searchexpression)
for ii in xrange(len(fits_2D)):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))
regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg')
self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile)
out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[DSCI]')
out = commands.getoutput('xpaset -p ds9 regions '+regionfile)
Fstart += 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))
regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg')
self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile)
out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[SCI]')
out = commands.getoutput('xpaset -p ds9 regions '+regionfile)
Fstart += 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))
regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg')
self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile)
out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[CONTAM]')
out = commands.getoutput('xpaset -p ds9 regions '+regionfile)
Fstart += 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))
regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg')
self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile)
contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contam. subtracted spectrum
out = commands.getoutput('xpaset -p ds9 file '+contamsub)
out = commands.getoutput('xpaset -p ds9 regions '+regionfile)
# If a sextractor region file for the SCI-CONTAM image exists, show it.
sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg'
if os.path.exists(sexregion):
out = commands.getoutput('xpaset -p ds9 regions '+sexregion)
Fstart += 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def openfits_but_cmd(self):
"""
Command for openfits button
"""
self.ds9open = True
self.regiontemp = 'temp_ds9_forinspection.reg'
idstr = str("%05d" % self.currentobj)
lockstr = self.lockds9string()
ds9cmd = 'ds9 -geometry 1200x600 -scale zscale '+lockstr+' -tile grid layout 4 '+str(2*int(self.Npa))
for PA in self.PAs:
PAstr = '-'+str("%03d" % int(PA))+'-'
if self.MASTfiles:
searchext = '2d.fits'
else:
searchext = '2D.fits'
fits_2D = glob.glob(self.dir+'*'+PAstr+'*'+idstr+'*'+searchext)
for ii in xrange(len(fits_2D)):
regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg')
self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile)
ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[DSCI]" -region '+regionfile+' '
regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg')
self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile)
ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[SCI]" -region '+regionfile+' '
regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg')
self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile)
ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[CONTAM]" -region '+regionfile+' '
regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg')
self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile)
contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contamination subtracted spectrum
ds9cmd = ds9cmd+' "'+contamsub+'" -region '+regionfile+' '
# If a sextractor region file for the SCI-CONTAM image exists, show it.
sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg'
if os.path.exists(sexregion):
ds9cmd = ds9cmd+' -region '+sexregion+' '
self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ["SHELL"])
time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist
self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def lockds9string(self):
"""
"""
if int(self.ds9version[1].split('.')[0]) >= 7: # only lock if ds9 version is 7 or later
lockstr = ' -lock frame physical '
else:
print ' - WARNING DS9 version older than 7.*; Not locking frames.'
lockstr = ' '
return lockstr
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ds9textregion(self,text,filename='temp.reg'):
"""
Create ds9 region file with text string
Note that it's overwriting any existing file!
"""
regstr = 'physical\n# text(130,10) textangle=0 textrotate=0 font="helvetica 12 normal roman" text={'+text+'}'
fds9region = open(filename,'w')
fds9region.write(regstr)
fds9region.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def updateimage(self):
"""
update image in GUI
"""
img = ImageTk.PhotoImage(Image.open(self.GUIimage).resize((self.imgx,self.imgy),Image.ANTIALIAS))
self.imageframe.configure(image = img)
self.imageframe.image = img
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def reset(self,skip=False):
"""
Writing results to output, resetting checkboxes, and closing DS9 and PNG windows
if skip=True nothing will be written to output file.
"""
if (self.autosaveplot) & (skip==False): self.dataPlot_savebutton_cmd() # saving plot before resetting
try: # checking that the input can be converted to a float
zbyhand = str(float(self.byhandz.get()))+' '
except:
zbyhand = '-99 '
if (str(self.byhandz.get()) != ''):
print ' - WARNING: by-hand redshift field | |
requestupdated_at = str(d["ticket"]["updated_at"]).replace("T", " ").replace("Z", "")
requestassignee_id = str(d["ticket"]["assignee_id"])
requestseverity = str(d["ticket"]["tags"]).replace("[']", "")
# request_res_field = str(d["ticket"]["custom_fields"]['id': 24598606])
# print(request_res_field)
# except:
# return messageDetail.ReplyToChat("Cannot get ticket info for ID " + str(zdid))
if (len(d["ticket"]["tags"])) == 0:
noTag = True
else:
noTag = False
notSet = True
if noTag:
sev = "Not set"
notSet = False
for index_tags in range(len(d["ticket"]["tags"])):
tags = str((d["ticket"]["tags"][index_tags]))
if tags.startswith("severity_1"):
sev = "Severity 1"
notSet = False
elif tags.startswith("severity_2"):
sev = "Severity 2"
notSet = False
elif tags.startswith("severity_3"):
sev = "Severity 3"
notSet = False
elif tags.startswith("severity_4"):
sev = "Severity 4"
notSet = False
if notSet:
sev = "Not Set"
notSet = False
requestseverity = sev
request_id = str(requestid)
request_priority = str(requestpriority)
request_subject = str(requestsubject)
request_desc = str(requestdescription)
request_org = str(requestorganization_id)
request_requestor = str(requestrequester_id)
request_created = str(requestcreated_at)
request_updated = str(requestupdated_at)
request_severity = str(requestseverity)
try:
# To get the name of the requester given the requesterID
conn.request("GET", "/api/v2/users/" + request_requestor, headers=headers)
res = conn.getresponse()
userRequesterId = res.read()
tempUserRequester = str(userRequesterId.decode('utf-8'))
# data = json.dumps(tempUserRequester, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserRequester))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
req_name = str(d["user"]["name"])
requesterName = req_name
except:
try:
botlog.LogSymphonyInfo("Inside second try for requester name in showZD")
# To get the name of the requester given the requesterID
conn.request("GET", "/api/v2/users/" + request_requestor, headers=headers)
res = conn.getresponse()
userRequesterId = res.read()
tempUserRequester = str(userRequesterId.decode('utf-8'))
# data = json.dumps(tempUserRequester, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserRequester))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
req_name = str(d["user"]["name"])
requesterName = req_name
except:
requesterName = "N/A"
messageDetail.ReplyToChat("Cannot get requester info")
# Getting IDs of requester and assignee to be processed
try:
request_assignee = str(requestassignee_id)
# To get the name of the assignee given the assigneeID
conn.request("GET", "/api/v2/users/" + request_assignee, headers=headers)
res = conn.getresponse()
userAssigneeId = res.read()
tempUserAssignee = str(userAssigneeId.decode('utf-8'))
# data = json.dumps(tempUserAssignee, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserAssignee))
data = json.dumps(data_dict, indent=2)
d = json.loads(str(data))
assign_name = str(d["user"]["name"])
assigneeName = assign_name
except:
try:
botlog.LogSymphonyInfo("Inside second try for assginee name value in ShowZD")
request_assignee = str(requestassignee_id)
# To get the name of the assignee given the assigneeID
conn.request("GET", "/api/v2/users/" + request_assignee, headers=headers)
res = conn.getresponse()
userAssigneeId = res.read()
tempUserAssignee = str(userAssigneeId.decode('utf-8'))
# data = json.dumps(tempUserAssignee, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserAssignee))
data = json.dumps(data_dict, indent=2)
d = json.loads(str(data))
assign_name = str(d["user"]["name"])
assigneeName = assign_name
except:
assigneeName = "N/A"
assignee_flag = True
requesterTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/requester/requested_tickets"
assigneeTicket = (_configDef['zdesk_config']['zdesk_url']) + "/agent/users/" + str(request_assignee) + "/assigned_tickets"
OrgTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/organization/tickets"
try:
# Convert the Zendesk ID to company name
conn.request("GET", "/api/v2/users/" + str(requestrequester_id) + "/organizations.json", headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"',""").replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
# print(orgName)
except:
try:
botlog.LogSymphonyInfo("Inside Second try for Org name value")
# Convert the Zendesk ID to company namer
conn.request("GET", "/api/v2/users/" + str(requestrequester_id) + "/organizations.json",
headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"',""").replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
# print(orgName)
except:
orgName = "N/A"
#messageDetail.ReplyToChat("Cannot get company info")
table_body = ""
if assignee_flag:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(assigneeName) + "</td>" \
"</tr></thead><tbody></tbody></table>"
else:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a></td>" \
"</tr></thead><tbody></tbody></table>"
# # Enable this to troubleshoot if there is any issue of character limitation
# UniqueToken = len(set(table_header.split()))
# print("Unique: " + str(UniqueToken))# + " Unique1: " + str(UniqueToken1))
# print("Ticket ID: " + str(ticketid))
#
# myTicketLenght = len(str(table_header))
# print(str(myTicketLenght))
# table_bodyFull += ("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a> (<a href=\"" + str(OrgTicket) + "\">" + str(orgName) + ")</a> " + str(request_subject) + " (assigned: " + "<a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a> Updated: " + str(request_updated) + " Status: " + str(requeststatus) + ")</header><body>" + table_header + "</body></card>")
table_bodyFull += ("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a> (<a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a>) " + str(request_subject) + " (assigned: " + "<a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a> Updated: " + str(request_updated) + " Status: " + str(requeststatus) + ")</header><body>" + table_header + "</body></card>")
reply = table_bodyFull
characterLimit = len(str(table_bodyFull))
#if characterLimit >= 70000:
if characterLimit >= int(_configDef['limit']['character']):
messageDetail.ReplyToChatV2("You have reached a character limitation. Ticket(s) from ID " + str(request_id) + " is/are not showing, please check against your given ticket list")
return messageDetail.ReplyToChatV2_noBotLog(str(reply))
break
try:
if wrongID:
if index == len(message_split) - 1:
return messageDetail.ReplyToChatV2(reply + "<p></p><b>There is no such Zendesk ticket number: " + str(wrongZDID) + "</b>")
except:
if index == len(message_split) - 1:
#messageDetail.ReplyToChatV2(reply)
#messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
messageDetail.ReplyToChatV2_noBotLog(str(reply))
except:
return messageDetail.ReplyToChat("I am sorry, I was working on a different task, can you please retry")
def showTicketComments (messageDetail):
botlog.LogSymphonyInfo("######################################")
botlog.LogSymphonyInfo("Bot Call: Show Zendesk ticket comments")
botlog.LogSymphonyInfo("######################################")
try:
privateComment = False
prvCom = False
counter = True
notPrivate = True
messageSent = False
limitMessageNeeded = True
isAllowed = ""
table_bodyFull = ""
table_header = ""
commentLenght = ""
table_header = ""
UniqueToken = ""
showComment = ""
isMe = ""
commandCallerUID = messageDetail.FromUserId
connComp = http.client.HTTPSConnection(_configDef['symphonyinfo']['pod_hostname'])
sessionTok = callout.GetSessionToken()
headersCompany | |
<reponame>C6SUMMER/allinclusive-kodi-pi<gh_stars>0
#
# Copyright (C) 2013 <NAME>
# http://tommy.winther.nu
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import StringIO
import os
import threading
import datetime
import time
import urllib2
from xml.etree import ElementTree
import buggalo
from strings import *
import ysapi
import xbmc
import xbmcgui
import xbmcvfs
import sqlite3
SETTINGS_TO_CHECK = ['source', 'youseetv.category', 'xmltv.type', 'xmltv.file', 'xmltv.url', 'xmltv.logo.folder']
class Channel(object):
def __init__(self, id, title, logo=None, streamUrl=None, visible=True, weight=-1):
self.id = id
self.title = title
self.logo = logo
self.streamUrl = streamUrl
self.visible = visible
self.weight = weight
def isPlayable(self):
return hasattr(self, 'streamUrl') and self.streamUrl
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return 'Channel(id=%s, title=%s, logo=%s, streamUrl=%s)' \
% (self.id, self.title, self.logo, self.streamUrl)
class Program(object):
def __init__(self, channel, title, startDate, endDate, description, imageLarge=None, imageSmall=None,
notificationScheduled=None):
"""
@param channel:
@type channel: source.Channel
@param title:
@param startDate:
@param endDate:
@param description:
@param imageLarge:
@param imageSmall:
"""
self.channel = channel
self.title = title
self.startDate = startDate
self.endDate = endDate
self.description = description
self.imageLarge = imageLarge
self.imageSmall = imageSmall
self.notificationScheduled = notificationScheduled
def __repr__(self):
return 'Program(channel=%s, title=%s, startDate=%s, endDate=%s, description=%s, imageLarge=%s, imageSmall=%s)' % \
(self.channel, self.title, self.startDate, self.endDate, self.description, self.imageLarge,
self.imageSmall)
class SourceException(Exception):
pass
class SourceUpdateCanceledException(SourceException):
pass
class SourceNotConfiguredException(SourceException):
pass
class DatabaseSchemaException(sqlite3.DatabaseError):
pass
class Database(object):
SOURCE_DB = 'source.db'
CHANNELS_PER_PAGE = 9
def __init__(self):
self.conn = None
self.eventQueue = list()
self.event = threading.Event()
self.eventResults = dict()
self.source = instantiateSource()
self.updateInProgress = False
self.updateFailed = False
self.settingsChanged = None
self.alreadyTriedUnlinking = False
#buggalo.addExtraData('source', self.source.KEY)
#for key in SETTINGS_TO_CHECK:
# buggalo.addExtraData('setting: %s' % key, ADDON.getSetting(key))
self.channelList = list()
profilePath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
if not os.path.exists(profilePath):
os.makedirs(profilePath)
self.databasePath = os.path.join(profilePath, Database.SOURCE_DB)
threading.Thread(name='Database Event Loop', target=self.eventLoop).start()
def eventLoop(self):
print 'Database.eventLoop() >>>>>>>>>> starting...'
while True:
self.event.wait()
self.event.clear()
event = self.eventQueue.pop(0)
command = event[0]
callback = event[1]
print 'Database.eventLoop() >>>>>>>>>> processing command: ' + command.__name__
try:
result = command(*event[2:])
self.eventResults[command.__name__] = result
if callback:
if self._initialize == command:
threading.Thread(name='Database callback', target=callback, args=[result]).start()
else:
threading.Thread(name='Database callback', target=callback).start()
if self._close == command:
del self.eventQueue[:]
break
except Exception:
print 'Database.eventLoop() >>>>>>>>>> exception!'
buggalo.onExceptionRaised()
print 'Database.eventLoop() >>>>>>>>>> exiting...'
def _invokeAndBlockForResult(self, method, *args):
event = [method, None]
event.extend(args)
self.eventQueue.append(event)
self.event.set()
while not method.__name__ in self.eventResults:
time.sleep(0.1)
result = self.eventResults.get(method.__name__)
del self.eventResults[method.__name__]
return result
def initialize(self, callback, cancel_requested_callback=None):
self.eventQueue.append([self._initialize, callback, cancel_requested_callback])
self.event.set()
def _initialize(self, cancel_requested_callback):
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
self.alreadyTriedUnlinking = False
while True:
if cancel_requested_callback is not None and cancel_requested_callback():
break
try:
self.conn = sqlite3.connect(self.databasePath, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('PRAGMA foreign_keys = ON')
self.conn.row_factory = sqlite3.Row
# create and drop dummy table to check if database is locked
c = self.conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS database_lock_check(id TEXT PRIMARY KEY)')
c.execute('DROP TABLE database_lock_check')
c.close()
self._createTables()
self.settingsChanged = self._wasSettingsChanged(ADDON)
break
except sqlite3.OperationalError:
if cancel_requested_callback is None:
xbmc.log('[script.tvguide] Database is locked, bailing out...', xbmc.LOGDEBUG)
break
else: # ignore 'database is locked'
xbmc.log('[script.tvguide] Database is locked, retrying...', xbmc.LOGDEBUG)
except sqlite3.DatabaseError:
self.conn = None
if self.alreadyTriedUnlinking:
xbmc.log('[script.tvguide] Database is broken and unlink() failed', xbmc.LOGDEBUG)
break
else:
try:
os.unlink(self.databasePath)
except OSError:
pass
self.alreadyTriedUnlinking = True
xbmcgui.Dialog().ok(ADDON.getAddonInfo('name'), strings(DATABASE_SCHEMA_ERROR_1),
strings(DATABASE_SCHEMA_ERROR_2), strings(DATABASE_SCHEMA_ERROR_3))
return self.conn is not None
def close(self, callback=None):
self.eventQueue.append([self._close, callback])
self.event.set()
def _close(self):
try:
# rollback any non-commit'ed changes to avoid database lock
if self.conn:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
if self.conn:
self.conn.close()
def _wasSettingsChanged(self, addon):
settingsChanged = False
noRows = True
count = 0
c = self.conn.cursor()
c.execute('SELECT * FROM settings')
for row in c:
noRows = False
key = row['key']
if SETTINGS_TO_CHECK.count(key):
count += 1
if row['value'] != addon.getSetting(key):
settingsChanged = True
if count != len(SETTINGS_TO_CHECK):
settingsChanged = True
if settingsChanged or noRows:
for key in SETTINGS_TO_CHECK:
value = addon.getSetting(key).decode('utf-8', 'ignore')
c.execute('INSERT OR IGNORE INTO settings(key, value) VALUES (?, ?)', [key, value])
if not c.rowcount:
c.execute('UPDATE settings SET value=? WHERE key=?', [value, key])
self.conn.commit()
c.close()
print 'Settings changed: ' + str(settingsChanged)
return settingsChanged
def _isCacheExpired(self, date):
if self.settingsChanged:
return True
# check if channel data is up-to-date in database
try:
c = self.conn.cursor()
c.execute('SELECT channels_updated FROM sources WHERE id=?', [self.source.KEY])
row = c.fetchone()
if not row:
return True
channelsLastUpdated = row['channels_updated']
c.close()
except TypeError:
return True
# check if program data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
if row:
programsLastUpdated = row['programs_updated']
else:
programsLastUpdated = datetime.datetime.fromtimestamp(0)
c.close()
return self.source.isUpdated(channelsLastUpdated, programsLastUpdated)
def updateChannelAndProgramListCaches(self, callback, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True):
self.eventQueue.append(
[self._updateChannelAndProgramListCaches, callback, date, progress_callback, clearExistingProgramList])
self.event.set()
def _updateChannelAndProgramListCaches(self, date, progress_callback, clearExistingProgramList):
# todo workaround service.py 'forgets' the adapter and convert set in _initialize.. wtf?!
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
if not self._isCacheExpired(date):
return
self.updateInProgress = True
self.updateFailed = False
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
try:
xbmc.log('[script.tvguide] Updating caches...', xbmc.LOGDEBUG)
if progress_callback:
progress_callback(0)
if self.settingsChanged:
c.execute('DELETE FROM channels WHERE source=?', [self.source.KEY])
c.execute('DELETE FROM programs WHERE source=?', [self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?", [self.source.KEY])
self.settingsChanged = False # only want to update once due to changed settings
if clearExistingProgramList:
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
else:
c.execute("DELETE FROM updates WHERE source=? AND date=?",
[self.source.KEY, dateStr]) # cascades and deletes associated programs records
# programs updated
c.execute("INSERT INTO updates(source, date, programs_updated) VALUES(?, ?, ?)",
[self.source.KEY, dateStr, datetime.datetime.now()])
updatesId = c.lastrowid
imported = imported_channels = imported_programs = 0
for item in self.source.getDataFromExternal(date, progress_callback):
imported += 1
if imported % 10000 == 0:
self.conn.commit()
if isinstance(item, Channel):
imported_channels += 1
channel = item
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=(CASE ? WHEN -1 THEN visible ELSE ? END), weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.weight, channel.visible,
channel.weight, channel.weight, channel.id, self.source.KEY])
elif isinstance(item, Program):
imported_programs += 1
program = item
if isinstance(program.channel, Channel):
channel = program.channel.id
else:
channel = program.channel
c.execute(
'INSERT INTO programs(channel, title, start_date, end_date, description, image_large, image_small, source, updates_id) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)',
[channel, program.title, program.startDate, program.endDate, program.description,
program.imageLarge, program.imageSmall, self.source.KEY, updatesId])
# channels updated
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.conn.commit()
if imported_channels == 0 or imported_programs == 0:
self.updateFailed = True
except SourceUpdateCanceledException:
# force source update on next load
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
self.conn.commit()
except Exception:
import traceback as tb
import sys
(etype, value, traceback) = sys.exc_info()
tb.print_exception(etype, value, traceback)
try:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
try:
# invalidate cached data
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
self.conn.commit()
except sqlite3.OperationalError:
pass # database is locked
self.updateFailed = True
finally:
self.updateInProgress = False
c.close()
def getEPGView(self, channelStart, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True):
result = self._invokeAndBlockForResult(self._getEPGView, channelStart, date, progress_callback,
clearExistingProgramList)
if self.updateFailed:
raise SourceException('No channels or programs imported')
return result
def _getEPGView(self, channelStart, date, progress_callback, clearExistingProgramList):
self._updateChannelAndProgramListCaches(date, progress_callback, clearExistingProgramList)
channels = self._getChannelList(onlyVisible=True)
if channelStart < 0:
channelStart = len(channels) - 1
elif channelStart > len(channels) - 1:
channelStart = 0
channelEnd = channelStart + Database.CHANNELS_PER_PAGE
channelsOnPage = channels[channelStart: channelEnd]
programs = self._getProgramList(channelsOnPage, date)
return [channelStart, channelsOnPage, programs]
def getNextChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx += 1
if idx > len(channels) - 1:
idx = 0
return | |
= ""
classificacao = request.forms.get('classify')
atendedor = request.forms.get('atendedor')
db_user = Users(where = "login='{user}'".format(user=atendedor)).get()
if db_user:
from gap_opiniao import GAPOpiniao
db_user = db_user[0]
import erp_config as ec
res = GAPOpiniao().addOpiniao(user=db_user['id'],nome=nome,contacto=contacto,comentario=comentario,classificacao=classificacao, loja=ec.terminal_name, nome_atendedor=atendedor)
if(res == True):
return dict(reponse='ok')
else:
return dict(reponse='error')
except:
return dict(reponse='error')
#get o ecran do quiosque
@route('/quiosque')
@view('ecranQuiosque')
def get_ecranSenha():
window_id = str(get_window_id())
import erp_config as ec
from gap_servico import GAPServico
servicos = GAPServico().get_servico()
return dict(title='Ecran Quiosque', favicon=ec.favicon, logotipo=ec.logotipo, servicos=servicos, window_id=window_id)
#imprimir senha
@post('/printSenha/<servico>/<letra>')
def imprimirSenha(servico,letra):
try:
import base64
from users import Users
db_user = Users(where = "login='{user}'".format(user='admin')).get()
if db_user:
db_user = db_user[0]
#window_id = str(get_window_id())
from gap_senha import GAPSenha
import erp_config as ec
res = GAPSenha().get_SenhaCliente(user=db_user['id'], servico=servico, letra=letra, loja=ec.terminal_name)
return res
except:
return None
#faz o get voz da tv fururamente por melhorar
@post('/getvoicetv/<senha_content>/<numero_balcao>')
def get_voicetv(senha_content,numero_balcao):
from gap_multimedia import GAPMultimedia
import erp_config as ec
#sound tv path
path = "/var/www/core/static/audio"
warning=GAPMultimedia().find(name='aviso.wav', path=path)
servico=GAPMultimedia().find(name='serviço.wav', path=path)
senha=GAPMultimedia().find(name='senha.wav', path=path)
balcao=GAPMultimedia().find(name='balcão.wav', path=path)
numero_senha=senha_content[1:]
letra_senha=senha_content[:1]
letra_senha=GAPMultimedia().find(name=str(letra_senha).lower()+'.wav', path=path)
numero_senha=GAPMultimedia().find(name=str(numero_senha)+'.wav', path=path)
numero_balcao=GAPMultimedia().find(name=str(numero_balcao)+'.wav', path=path)
#Por enquanto estamos a seguir a seguinte sequencia (futuramente aprimorar de forma a tornar isso dinamico)
return str(warning)+";"+str(servico)+";"+str(letra_senha)+";"+str(senha)+";"+str(numero_senha)+";"+str(balcao)+";"+str(numero_balcao)
#Compras Publicas ainda essas funçoes precisam de toque
@route('/cms')
@view('cms')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
print('1')
#set_base_context(window_id)
print('2')
#ctx_dict = get_context(window_id)
#ctx_dict = {'titulo':'Portal de Compras Públicas'}
#ctx_dict['window_id'] = window_id
ctx_dict = {}
ctx_dict['name'] = 'index'
ctx_dict['title'] = 'Portal de Compras Públicas'
from cp_banner import Banner
resImgBanner = Banner().get_imgBanner()
ctx_dict['dadosImgBanner'] = resImgBanner
from cp_planoaquisanual import Planoaquisanual
res = Planoaquisanual().get_Planoaquisanual()
ctx_dict['dadosPlanoAA'] = res
respaaa = Planoaquisanual().get_PlanoaquisanualAgrup()
ctx_dict['dadosPlanoAAAgru'] = respaaa
from cp_procedimento import Procedimento
resconcAbert = Procedimento().getConcursoAberto()
ctx_dict['dadosconcursAbert'] = resconcAbert
ctx_dict['pagecountAberto'] = Procedimento().get_pagecountAberto()
resconcEmAndament = Procedimento().getConcursoEmAndamento()
ctx_dict['dadosconcursEmAndament'] = resconcEmAndament
ctx_dict['pagecountEmAndament'] = Procedimento().get_pagecountEmAndament()
resconcConcluid = Procedimento().getConcursoConcluido()
ctx_dict['dadosconcursConcluid'] = resconcConcluid
ctx_dict['pagecountConcluido'] = Procedimento().get_pagecountConcluido()
print("erro -------->>>> Aqui ")
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
print('fim do main_route')
return ctx_dict
@route('/View_concursos/<viewid>')
@view('concursosview')
#@require_auth
def main(viewid):
"""Funçao index"""
print('Init do main_route')
print('->>>>>>>>>>>>>>>>>>'+viewid)
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_procedimento import Procedimento
resconcursView = Procedimento().getConcursoView(viewid)
ctx_dict['dadosconcursView'] = resconcursView
from cp_esclarecimento import Esclarecimento
respEsclare = Esclarecimento().getProcedEsclare(viewid)
ctx_dict['dadosrespEsclare'] = respEsclare
from cp_documentoprocedimento import Documentoprocedimento
respDocProced = Documentoprocedimento().getProcedDoc(viewid)
ctx_dict['dadosrespDocProced'] = respDocProced
from cp_financiadorprocedimento import Financiadorprocedimento
respFinaciadProced = Financiadorprocedimento().getFinaciadProced(viewid)
ctx_dict['dadosrespFinaciadProced'] = respFinaciadProced
from cp_observacaoprocedimento import Observacaoprocedimento
respObservaProced = Observacaoprocedimento().getObservaProced(viewid)
ctx_dict['dadosrespObservaProced'] = respObservaProced
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
print('fim do main_route')
return ctx_dict
@route('/Concursos')
@view('concursos')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/view_new')
@view('newview')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Contratos')
@view('contratos')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_contrato import Contrato
respContrato = Contrato().getContrato()
ctx_dict['dadosrespContrato'] = respContrato
ctx_dict['pagecount'] = Contrato().get_pagecount()
print(ctx_dict['dadosrespContrato'])
print(ctx_dict['pagecount'])
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Entidade_publica')
@view('entidade_publica')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_adjudicante import Adjudicante
resQueryEntidadeAdjucante = Adjudicante().getEntidadeAdjucante()
ctx_dict['dadosEntidadeAdjucante'] = resQueryEntidadeAdjucante
ctx_dict['pagecount'] = Adjudicante().get_pagecount()
print(ctx_dict['pagecount'])
#from cp_adjudicante import Adjudicante
#resadjud = Adjudicante().get_adjudicante()
#ctx_dict['dadosadjudic'] = resadjud
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Faq')
@view('faq')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_faqs import Faqs
respFaqs = Faqs().get_faqs()
ctx_dict['dadosRespFaqs'] = respFaqs
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Legislacao')
@view('legislacao')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDocLeis = Documento().getDocLeis()
ctx_dict['dadosDocLeis'] = resQueryDocLeis
print(str(resQueryDocLeis))
resQueryDocDecretos = Documento().getDocDecretos()
ctx_dict['dadosDocDecretos'] = resQueryDocDecretos
resQueryDocResolucoesPortarias = Documento().getDocResolucoesPortarias()
ctx_dict['dadosDocResolucoesPortarias'] = resQueryDocResolucoesPortarias
resQueryDocRegulamentos = Documento().getDocRegulamentos()
ctx_dict['dadosDocRegulamentos'] = resQueryDocRegulamentos
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Directiva')
@view('directiva')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDirectiva = Documento().getDocDirectiva()
ctx_dict['dadosDocDirectiva'] = resQueryDirectiva
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/DeclaracoesFornecedores')
@view('declaracoes_fornecedores')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDeclaracoesFornecedores = Documento().getDocDeclaracoesFornecedores()
ctx_dict['dadosDeclaracoesFornecedores'] = resQueryDeclaracoesFornecedores
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/DocEstandarizados')
@view('doc_estandarizados')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDocEstandarizadosProgramas = Documento().getDocEstandarizadosProgramas()
ctx_dict['dadosDocEstandarizadosProgramas'] = resQueryDocEstandarizadosProgramas
resQueryDocEstandarizadosCadernos = Documento().getDocEstandarizadosCadernos()
ctx_dict['dadosDocEstandarizadosCadernos'] = resQueryDocEstandarizadosCadernos
resQueryDocEstandarizadosConvites = Documento().getDocEstandarizadosConvites()
ctx_dict['dadosDocEstandarizadosConvites'] = resQueryDocEstandarizadosConvites
resQueryDocEstandarizadosTermos = Documento().getDocEstandarizadosTermos()
ctx_dict['dadosDocEstandarizadosTermos'] = resQueryDocEstandarizadosTermos
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Mapa_do_site')
@view('mapa_site')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDocResolucoesPortarias = Documento().getDocResolucoesPortarias()
ctx_dict['dadosDocResolucoesPortarias'] = resQueryDocResolucoesPortarias
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
@route('/Manual')
@view('manual')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDocManual = Documento().getDocManual()
ctx_dict['dadosDocManual'] = resQueryDocManual
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
"""
@route('/Regulamentos')
@view('regulamento')
#@require_auth
def main():
#Funçao index
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_documento import Documento
resQueryDocRegulamentos = Documento().getDocRegulamentos()
ctx_dict['dadosDocRegulamentos'] = resQueryDocRegulamentos
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
#print('fim do main_route')
return ctx_dict
"""
@route('/Fornecedores')
@view('fornecedores')
#@require_auth
def main():
"""Funçao index"""
print('Init do main_route')
#window_id = str(get_window_id())
#print('1')
#set_base_context(window_id)
#print('2')
#ctx_dict = get_context(window_id)
ctx_dict = {'titulo':'Portal de Compras Públicas'}
from cp_fornecedor import Fornecedor
res = Fornecedor().get_fornecedor()
ctx_dict['dados'] = res
ctx_dict['pagecount'] = Fornecedor().get_pagecount()
print(ctx_dict['pagecount'])
#ctx_dict['window_id'] = window_id
#ctx_dict['name'] = 'index'
#ctx_dict['title'] = 'ERP +'
#ctx_dict['form'] = ''
#set_context(window_id, ctx_dict)
print('fim do main_route')
return ctx_dict
@get('/get_page/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_fornecedor import Fornecedor
res = Fornecedor().get_html_page(page=page, pagelimit=pagelimit)
return str(res)
@get('/get_pageEntidadePublica/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_adjudicante import Adjudicante
res = Adjudicante().get_html_page(page=page, pagelimit=pagelimit)
return str(res)
@get('/get_pageContrato/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_contrato import Contrato
res = Contrato().get_html_page(page=page, pagelimit=pagelimit)
return str(res)
@get('/get_pageProcedAbert/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_procedimento import Procedimento
res = Procedimento().get_html_pageAbert(page=page, pagelimit=pagelimit)
return str(res)
@get('/get_pageProcedEmAndament/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_procedimento import Procedimento
res = Procedimento().get_html_pageEmAndament(page=page, pagelimit=pagelimit)
return str(res)
@get('/get_pageProcedConcluido/<page>/<pagelimit>')
def get_page(page, pagelimit):
from cp_procedimento import Procedimento
res = Procedimento().get_html_pageConcluido(page=page, pagelimit=pagelimit)
return str(res)
#addicionar novo FORNECEDOR
@post('/registFornecd')
def addFornecedor():
try:
import base64
from users import Users
nomeForn = request.forms.get('fornecName')
nifForn = request.forms.get('fornecNif')
tipoEmpForn = request.forms.get('tipo_empresa')
paisForn = request.forms.get('pais_empresa')
areaForn = request.forms.get('area_servico')
localizacaoForn = request.forms.get('fornecLocalizacao')
emailForn = request.forms.get('fornecEmail')
senhaForn = request.forms.get('fornecSenha')
obsForn = request.forms.get('fornecObs')
db_user = Users(where = "login='{user}'".format(user='admin')).get()
if db_user:
from cp_fornecedor import Fornecedor
db_user = db_user[0]
import erp_config as ec
res = Fornecedor().addFornecedor(user=db_user['id'], nomeForn=nomeForn, nifForn=nifForn, tipoEmpForn=tipoEmpForn, paisForn=paisForn, areaForn=areaForn, localizacaoForn=localizacaoForn, emailForn=emailForn, senhaForn=senhaForn, obsForn=obsForn)
if(res == True):
return 'ok'
else:
return 'error'
except:
return 'error'
#addicionar novo FORNECEDOR
@get('/getdadosFornecd/<nifForn>')
def get_dad_contribuinte(nifForn):
from cp_contribuinte import Contribuinte
resContrib = Contribuinte().get_dad_contribuinte(nifForn=nifForn)
return resContrib
#addicionar novo Esclarecimento
@get('/registEsclareciment/<nomeForn>/<nifForn>/<contatolForn>/<assuntEsclrt>/<textEsclrt>/<estadoEsclrt>/<refProcedt>')
def addEsclarecimento(nomeForn, nifForn, contatolForn, assuntEsclrt, textEsclrt, estadoEsclrt, refProcedt):
try:
import base64
from users import Users
"""
nomeForn = request.forms.get('nomeFornec')
nifForn = request.forms.get('nifFornec')
contatolForn = request.forms.get('contatFornec')
assuntEsclrt = request.forms.get('esclrassunto')
textEsclrt = request.forms.get('esclrtest')
estadoEsclrt = request.forms.get('esclrestado')
refProcedt = request.forms.get('referencprocedimet')
"""
db_user = Users(where = | |
method",
"title_templated": False,
"title": "Methods should have docstrings.",
"solution": "Add a docstring to your method.",
"explanation": """
A docstring is a special comment at the top of your method that briefly explains the purpose of the method. It should have 3 sets of quotes to start and finish the comment.
For example:
```
class Kiwi():
\"\"\"Represents a kiwi bird from New Zealand.\"\"\"
def eat_plants(amount):
\"\"\"Calculates calories from the given amount of plant food and eats it.\"\"\"
```
"""
},
"D103": {
"original_message": "Missing docstring in public function",
"title_templated": False,
"title": "This function should have a docstring.",
"solution": "Add a docstring to your function.",
"explanation": """
A docstring is a special comment at the top of your module that briefly explains the purpose of the function. It should have 3 sets of quotes to start and finish the comment.
For example:
```
def get_waypoint_latlng(number):
\"\"\"Return the latitude and longitude values of the waypoint.\"\"\"
```
"""
},
"D104": {
"original_message": "Missing docstring in public package",
"title_templated": False,
"title": "Packages should have docstrings.",
"solution": "Add a docstring to your package.",
"explanation": ""
},
"D105": {
"original_message": "Missing docstring in magic method",
"title_templated": False,
"title": "Magic methods should have docstrings.",
"solution": "Add a docstring to your magic method.",
"explanation": ""
},
"D106": {
"original_message": "Missing docstring in public nested class",
"title_templated": False,
"title": "Public nested classes should have docstrings.",
"solution": "Add a docstring to your public nested class.",
"explanation": ""
},
"D107": {
"original_message": "Missing docstring in __init__",
"title_templated": False,
"title": "The `__init__` method should have a docstring.",
"solution": "Add a docstring to your `__init__` method.",
"explanation": "This helps understand how objects of your class are created."
},
"D200": {
"original_message": "One-line docstring should fit on one line with quotes",
"title_templated": False,
"title": "Docstrings that are one line long should fit on one line with quotes.",
"solution": "Put your docstring on one line with quotes.",
"explanation": ""
},
"D201": {
"original_message": "No blank lines allowed before function docstring",
"title_templated": False,
"title": "Function docstrings should not have blank lines before them.",
"solution": "Remove any blank lines before your function docstring.",
"explanation": ""
},
"D202": {
"original_message": "No blank lines allowed after function docstring",
"title_templated": False,
"title": "Function docstrings should not have blank lines after them.",
"solution": "Remove any blank lines after your function docstring.",
"explanation": ""
},
"D203": {
"original_message": "1 blank line required before class docstring",
"title_templated": False,
"title": "Class docstrings should have 1 blank line before them.",
"solution": "Insert 1 blank line before your class docstring.",
"explanation": ""
},
"D204": {
"original_message": "1 blank line required after class docstring",
"title_templated": False,
"title": "Class docstrings should have 1 blank line after them.",
"solution": "Insert 1 blank line after your class docstring.",
"explanation": "This improves the readability of your code."
},
"D205": {
"original_message": "1 blank line required between summary line and description",
"title_templated": False,
"title": "There should be 1 blank line between the summary line and the description.",
"solution": "Insert 1 blank line between the summary line and the description.",
"explanation": """
This makes larger docstrings easier to read.
For example:
```
def get_stock_level(book):
\"\"\"Return the stock level for the given book.
This calculates the stock level across multiple stores in the city,
excluding books reserved for customers.
Args:
book (str): Name of the book.
Returns:
String of publication date in the format of 'DD/MM/YYYY'.
\"\"\"
```
"""
},
"D206": {
"original_message": "Docstring should be indented with spaces, not tabs",
"title_templated": False,
"title": "Docstrings should be indented using spaces, not tabs.",
"solution": "Make sure your docstrings are indented using spaces instead of tabs.",
"explanation": ""
},
"D207": {
"original_message": "Docstring is under-indented",
"title_templated": False,
"title": "Docstring is under-indented.",
"solution": "Add indentation levels to your docstring until it is at the correct indentation level.",
"explanation": ""
},
"D208": {
"original_message": "Docstring is over-indented",
"title_templated": False,
"title": "Docstring is over-indented.",
"solution": "Remove indentation levels from your docstring until it is at the correct indentation level.",
"explanation": ""
},
"D209": {
"original_message": "Multi-line docstring closing quotes should be on a separate line",
"title_templated": False,
"title": "Docstrings that are longer than one line should have closing quotes on a separate line.",
"solution": "Put the closing quotes of your docstring on a separate line.",
"explanation": """
This makes larger docstrings easier to read.
For example:
```
def get_stock_level(book):
\"\"\"Return the stock level for the given book.
This calculates the stock level across multiple stores in the city,
excluding books reserved for customers.
Args:
book (str): Name of the book.
Returns:
String of publication date in the format of 'DD/MM/YYYY'.
\"\"\"
```
"""
},
"D210": {
"original_message": "No whitespaces allowed surrounding docstring text",
"title_templated": False,
"title": "Text in docstrings should not be surrounded by whitespace.",
"solution": "Remove any whitespace from the start and end of your docstring.",
"explanation": ""
},
"D211": {
"original_message": "No blank lines allowed before class docstring",
"title_templated": False,
"title": "Class docstrings should not have blank lines before them.",
"solution": "Remove any blank lines before your class docstring.",
"explanation": ""
},
"D212": {
"original_message": "Multi-line docstring summary should start at the first line",
"title_templated": False,
"title": "Docstrings that are more than one line long should start at the first line.",
"solution": """
Ensure your docstring starts on the first line with quotes.
For example:
```
def get_stock_level(book):
\"\"\"Return the stock level for the given book.
```
""",
"explanation": ""
},
"D213": {
"original_message": "Multi-line docstring summary should start at the second line",
"title_templated": False,
"title": "Docstrings that are more than one line long should start at the second line.",
"solution": "Ensure your docstring starts on the second line, which is the first line without quotes.",
"explanation": ""
},
"D214": {
"original_message": "Section is over-indented",
"title_templated": False,
"title": "Section is indented by too many levels.",
"solution": "Remove indentation levels from this section until it is at the correct indentation level.",
"explanation": ""
},
"D215": {
"original_message": "Section underline is over-indented",
"title_templated": False,
"title": "Section underline is indented by too many levels.",
"solution": "Remove indentation levels from this section underline until it is at the correct indentation level.",
"explanation": ""
},
"D300": {
"original_message": "Use \"\"\"triple double quotes\"\"\"",
"title_templated": False,
"title": "Use \"\"\"triple double quotes\"\"\" around docstrings.",
"solution": "Use \"\"\" triple double quotes around your docstring.",
"explanation": ""
},
"D301": {
"original_message": "Use r\"\"\" if any backslashes in a docstring",
"title_templated": False,
"title": "Use r\"\"\" if there are any backslashes in a docstring.",
"solution": "Use r\"\"\" at the beginning of your docstring if it contains any backslashes.",
"explanation": ""
},
"D302": {
"original_message": "Use u\"\"\" for Unicode docstrings",
"title_templated": False,
"title": "Use u\"\"\" for docstrings that contain Unicode.",
"solution": "Use u\"\"\" at the beginning of your docstring if it contains any Unicode.",
"explanation": ""
},
"D400": {
"original_message": "First line should end with a period",
"title_templated": False,
"title": "The first line in docstrings should end with a period.",
"solution": "Add a period to the end of the first line in your docstring. It should be a short summary of your code, if it's too long you may need to break it apart into multiple sentences.",
"explanation": ""
},
"D401": {
"original_message": "First line should be in imperative mood",
"title_templated": False,
"title": "The first line in docstrings should read like a command.",
"solution": "Ensure the first line in your docstring reads like a command, not a description. For example 'Do this' instead of 'Does this', 'Return this' instead of 'Returns this'.",
"explanation": ""
},
"D402": {
"original_message": "First line should not be the function’s signature",
"title_templated": False,
"title": "The first line in docstrings should not be a copy of the function’s definition.",
"solution": "Rewrite the docstring to describe the purpose of the function.",
"explanation": ""
},
"D403": {
"original_message": "First word of the first line should be properly capitalized",
"title_templated": False,
"title": "The first word in the first line should be capitalised.",
"solution": "Capitalise the first word.",
"explanation": ""
},
"D404": {
"original_message": "First word of the docstring should not | |
<filename>radio_astro/python/systemp_calibration.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 DSPIRA.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from datetime import datetime
import time
try:
import h5py
except:
print ("Python package:")
print (" h5py")
print ("Not found. If needed, at the command line type:")
print ("pip install h5py")
print ("")
from gnuradio import gr
class systemp_calibration(gr.sync_block):
"""
systemp_calibration - takes input from a spectrometer.
In: Data stream of spectra
Several vectors are output:
out0: Latest Spectrum - either raw or denoised, with or without calibration, depending on user's choice.
out1: Gain - updated whenever "hot" or "cold" calibrations are done.
out2: System Temperature - updated whenever "hot" or "cold" calibrations are done.
The input signal is denoised using 2 methods:
1. Noise spikes are removed using a moving median method;
2. The spectrum is smoothed using a moving average weighted with a Gaussian function about each point.
Once the system temperature is determined as a function of frequency, its final value is taken as the average of the system temperature over the spectrum.
Parameters:
(1) vec_length - vector length in channels
(2) collect - controlled by a Chooser block, which needs 4 options with the variables: nocal (= raw spectrum), cal (= spectrum with calibrations), hot (= hot calibration), cold (= cold calibration)
(3) samp_rate - used to calculate frequency values for spectrum output; set in a Variable box.
(4) freq - center frequency used to calculate frequency values for spectrum output; set in a Variable box.
(5) prefix - used in the filename to describe the pathlength; set in a Variable box.
(6) spectrumcapture_toggle - determines whether the spectrum is captured to a file written to the pathlength described by the prefix variable, and written with the filename = prefix + timenow + "_spectrum.csv".
"""
def __init__(self, vec_length, collect, samp_rate, freq, prefix, spectrumcapture_toggle, clip_toggle, az, elev, location):
gr.sync_block.__init__(self,
name="systemp_calibration",
in_sig=[(np.float32, int(vec_length))],
out_sig=3*[(np.float32, int(vec_length))])
self.vec_length = int(vec_length)
self.collect = collect
self.samp_rate = samp_rate
self.freq = freq
self.prefix = prefix
self.spectrumcapture_toggle = False
self.clip_toggle = clip_toggle
self.az = az
self.elev = elev
self.location = location
# Define vectors and constants:
self.spectrum = np.zeros(vec_length)
self.filtered_out0 = np.ones(vec_length)
self.filtered_spike = np.ones(vec_length)
self.hot = 2*np.ones(vec_length)
self.cold = 1*np.ones(vec_length)
self.gain = np.ones(vec_length)
self.tsys = 50*np.ones(vec_length)
self.thot = 300
self.tcold = 10
self.frequencies = np.zeros(vec_length)
self.frequencies = np.arange(freq - samp_rate/2, freq + samp_rate/2, samp_rate/vec_length)[:vec_length]
self.data_array = np.zeros((vec_length,2))
self.a = np.zeros(self.vec_length)
self.x = np.zeros(vec_length)
self.Nclip_lo = 410
self.Nclip_hi = 410
self.spectrum_mask_full = np.ones(vec_length)
self.spectrum_mask_clipped = np.ones(vec_length)
self.spectrum_mask_clipped[:self.Nclip_lo] = 0
self.spectrum_mask_clipped[vec_length - self.Nclip_hi:] = 0
# To do a gaussian smoothing to the data, assign values to the gaussian kernal.
# Note: The parameter k defines the size of the window used in smoothing; "fwhm" defines the width of the gaussian fit.
# For the hot and cold calibrations, the spectrum has no peaks in the region of interest; so set k_cal larger: k = 50.
# For the data spectrum, set k smaller: k_spec = 8, to preserve the peak features.
self.k_spike = 10 # defines the range of values to take median average over for removing noise spikes
# CALCULATE GAUSSIAN SMOOTHING COEFFICIENTS
# Each data point will be smoothed by taking an average of +/- k points surrounding the point; the average is a weighted Gaussian average
#1. HOT & COLD CALIBRATIONS
# k_cal = +/- 50 points surrounding each point
# FWHM = 1/4 window width (sigma)
self.k_cal = 50
self.fwhm_cal = int(self.k_cal/4)
self.normal_factor_cal = 1/np.sqrt(2*np.pi*self.fwhm_cal**2)
self.gx_cal = np.arange(-self.k_cal,self.k_cal+1,1)
self.gauss_window_cal = self.normal_factor_cal*np.exp(-(self.gx_cal**2)/(2*self.fwhm_cal**2))
self.gauss_window_cal = self.gauss_window_cal/self.gauss_window_cal.sum()
#2. SPECTRUM SMOOTHING
# k_spec = +/- 8 points surrounding each point - a narrower window will not shift the peak positions
# FWHM = 1/4 window width (sigma)
self.k_spec = 8
self.fwhm_spec = int(self.k_spec/4)
self.normal_factor_spec = 1/np.sqrt(2*np.pi*self.fwhm_spec**2)
self.gx_spec = np.arange(-self.k_spec,self.k_spec+1,1)
self.gauss_window_spec = self.normal_factor_spec*np.exp(-(self.gx_spec**2)/(2*self.fwhm_spec**2))
self.gauss_window_spec = self.gauss_window_spec/self.gauss_window_spec.sum()
def work(self, input_items, output_items):
in0 = input_items[0]
# Copy the input data into a simpler array:
self.a[:] = in0[0,:].copy()
out0 = output_items[0]
out1 = output_items[1]
out2 = output_items[2]
if self.clip_toggle == "True":
self.spectrum_mask = self.spectrum_mask_clipped
else:
self.spectrum_mask = self.spectrum_mask_full
# Check if the "collect" Chooser is changed. If "hot" or "cold" are selected, the Gain and Tsys are updated.
# The collect variable is selected in the .grc program, as follows:
# "cal" = calibrated spectrum
# "hot" = spectrum stored in the hot[] array, used for calculating the gain and Tsys.
# "cold" = spectrum stored in the cold[] array, used for calculating the gain and Tsys.
# "nocal" = raw spectrum that is smoothed using the spike_smoothing and gauss_smoothing routines
# "nocal_nofilter" = raw spectrum
if self.collect == "cal":
self.spike_smoothing() # This routine removes noise spikes.
self.gauss_smoothing_spec() # This routine smoothes the data using a Gaussian averaging.
# The output is calibrated using the gain and Tsys:
out0[:] = (self.filtered_out0/(self.gain) - self.tsys)*self.spectrum_mask
self.spectrum[:] = (self.filtered_out0/(self.gain) - self.tsys)*self.spectrum_mask
# The self.spectrum array is what gets output to the .csv file when the Capture Latest Spectrum button is pressed.
elif self.collect == "hot":
self.spike_smoothing() # This routine removes noise spikes.
self.gauss_smoothing_cal() # This routine smoothes the data using a Gaussian averaging.
# self.filtered_out0 is the output array resulting from the smoothing routines. This spectrum gets stored in the hot temperature hot[] array.
self.hot[:] = self.filtered_out0[:]
# The displayed output is the filtered, non-calibrated spectrum
out0[:] = self.filtered_out0
self.spectrum[:] = self.filtered_out0
# Calculate/update the system gain and temperature arrays.
self.y = self.hot/self.cold
self.y[self.y == 1] = 2
self.tsys = (self.thot - self.y*self.tcold)/(self.y-1)
self.gain = self.cold/(self.tcold + self.tsys)
self.gain[self.gain <= 0] = 1
tm = np.median(self.tsys)
for i in range(self.vec_length):
self.tsys[i] = tm
elif self.collect == "cold":
self.spike_smoothing() # This routine removes noise spikes.
self.gauss_smoothing_cal() # This routine smoothes the data using a Gaussian averaging.
# self.filtered_out0 is the output array resulting from the smoothing routines. This spectrum gets stored in the cold temperature cold[] array.
self.cold[:] = self.filtered_out0[:]
# The displayed output is the filtered, non-calibrated spectrum
out0[:] = self.filtered_out0
self.spectrum[:] = self.filtered_out0
# Calculate/update the system gain and temperature arrays.
self.y = self.hot/self.cold
self.y[self.y == 1] = 2
self.tsys = (self.thot - self.y*self.tcold)/(self.y-1)
self.gain = self.cold/(self.tcold + self.tsys)
self.gain[self.gain <= 0] = 1
tm = np.median(self.tsys)
for i in range(self.vec_length):
self.tsys[i] = tm
elif self.collect == "nocal":
self.spike_smoothing() # This routine removes noise spikes.
self.gauss_smoothing_spec() # This routine smoothes the data using a Gaussian averaging.
# The output is the smoothed data, but not calibrated. self.filtered_out0 is the output array resulting from the smoothing routines.
out0[:] = self.filtered_out0*self.spectrum_mask
self.spectrum[:] = self.filtered_out0*self.spectrum_mask
else:
out0[:] = self.a[:]
self.spectrum[:] = self.a[:]
out1[:] = self.gain
out2[:] = self.tsys
if self.spectrumcapture_toggle == True: #If true, capture the spectrum to a .csv text file.
current_time = time.time()
self.timenow = datetime.now().strftime("%Y-%m-%d_%H.%M.%S.%f")[:-5]
#write (freq, output) as a column array to a text file, titled e.g. "2018-07-24_15.15.49_spectrum.txt"
# The "prefix", i.e. the file path, is defined in the prefix variable box in the .grc program.
self.textfilename = self.prefix + self.timenow + "_" + self.location + "_" + self.az + "_" + self.elev + "_spectrum.csv"
self.data_array[:,0] = np.round(self.frequencies/1e6, decimals=4)
self.data_array[:,1] | |
underlying socket.
:param str url: The AMQP URL to connect to
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ('_all_url_query_values',)
# The name of the private function for parsing and setting a given URL query
# arg is constructed by catenating the query arg's name to this prefix
_SETTER_PREFIX = '_set_url_'
def __init__(self, url):
"""Create a new URLParameters instance.
:param str url: The URL value
"""
super(URLParameters, self).__init__()
self._all_url_query_values = None
# Handle the Protocol scheme
#
# Fix up scheme amqp(s) to http(s) so urlparse won't barf on python
# prior to 2.7. On Python 2.6.9,
# `urlparse('amqp://127.0.0.1/%2f?socket_timeout=1')` produces an
# incorrect path='/%2f?socket_timeout=1'
if url[0:4].lower() == 'amqp':
url = 'http' + url[4:]
# TODO Is support for the alternative http(s) schemes intentional?
parts = urlparse.urlparse(url)
if parts.scheme == 'https':
self.ssl = True
elif parts.scheme == 'http':
self.ssl = False
elif parts.scheme:
raise ValueError('Unexpected URL scheme %r; supported scheme '
'values: amqp, amqps' % (parts.scheme,))
if parts.hostname is not None:
self.host = parts.hostname
# Take care of port after SSL status is known
if parts.port is not None:
self.port = parts.port
else:
self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT
if parts.username is not None:
self.credentials = pika_credentials.PlainCredentials(url_unquote(parts.username),
url_unquote(parts.password))
# Get the Virtual Host
if len(parts.path) > 1:
self.virtual_host = url_unquote(parts.path.split('/')[1])
# Handle query string values, validating and assigning them
self._all_url_query_values = urlparse.parse_qs(parts.query)
for name, value in dict_iteritems(self._all_url_query_values):
try:
set_value = getattr(self, self._SETTER_PREFIX + name)
except AttributeError:
raise ValueError('Unknown URL parameter: %r' % (name,))
try:
(value,) = value
except ValueError:
raise ValueError('Expected exactly one value for URL parameter '
'%s, but got %i values: %s' % (
name, len(value), value))
set_value(value)
def _set_url_backpressure_detection(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
backpressure_detection = {'t': True, 'f': False}[value]
except KeyError:
raise ValueError('Invalid backpressure_detection value: %r' %
(value,))
self.backpressure_detection = backpressure_detection
def _set_url_blocked_connection_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
blocked_connection_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid blocked_connection_timeout value %r: %r' %
(value, exc,))
self.blocked_connection_timeout = blocked_connection_timeout
def _set_url_channel_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
channel_max = int(value)
except ValueError as exc:
raise ValueError('Invalid channel_max value %r: %r' % (value, exc,))
self.channel_max = channel_max
def _set_url_client_properties(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.client_properties = ast.literal_eval(value)
def _set_url_connection_attempts(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
connection_attempts = int(value)
except ValueError as exc:
raise ValueError('Invalid connection_attempts value %r: %r' %
(value, exc,))
self.connection_attempts = connection_attempts
def _set_url_frame_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
frame_max = int(value)
except ValueError as exc:
raise ValueError('Invalid frame_max value %r: %r' % (value, exc,))
self.frame_max = frame_max
def _set_url_heartbeat(self, value):
"""Deserialize and apply the corresponding query string arg"""
if 'heartbeat_interval' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat value %r: %r' % (value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_heartbeat_interval(self, value):
"""Deserialize and apply the corresponding query string arg"""
warnings.warn('heartbeat_interval is deprecated, use heartbeat',
DeprecationWarning, stacklevel=2)
if 'heartbeat' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat_interval value %r: %r' %
(value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_locale(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.locale = value
def _set_url_retry_delay(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
retry_delay = float(value)
except ValueError as exc:
raise ValueError('Invalid retry_delay value %r: %r' % (value, exc,))
self.retry_delay = retry_delay
def _set_url_socket_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
socket_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid socket_timeout value %r: %r' %
(value, exc,))
self.socket_timeout = socket_timeout
def _set_url_ssl_options(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.ssl_options = ast.literal_eval(value)
def _set_url_tcp_options(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.tcp_options = ast.literal_eval(value)
class SSLOptions(object):
"""Class used to provide parameters for optional fine grained control of SSL
socket wrapping.
:param string keyfile: The key file to pass to SSLContext.load_cert_chain
:param string key_password: The key password to passed to
SSLContext.load_cert_chain
:param string certfile: The certificate file to passed to
SSLContext.load_cert_chain
:param bool server_side: Passed to SSLContext.wrap_socket
:param verify_mode: Passed to SSLContext.wrap_socket
:param ssl_version: Passed to SSLContext init, defines the ssl
version to use
:param string cafile: The CA file passed to
SSLContext.load_verify_locations
:param string capath: The CA path passed to
SSLContext.load_verify_locations
:param string cadata: The CA data passed to
SSLContext.load_verify_locations
:param do_handshake_on_connect: Passed to SSLContext.wrap_socket
:param suppress_ragged_eofs: Passed to SSLContext.wrap_socket
:param ciphers: Passed to SSLContext.set_ciphers
:param server_hostname: SSLContext.wrap_socket, used to enable SNI
"""
def __init__(self,
keyfile=None,
key_password=<PASSWORD>,
certfile=None,
server_side=False,
verify_mode=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_SSLv23,
cafile=None,
capath=None,
cadata=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None,
server_hostname=None):
self.keyfile = keyfile
self.key_password = <PASSWORD>
self.certfile = certfile
self.server_side = server_side
self.verify_mode = verify_mode
self.ssl_version = ssl_version
self.cafile = cafile
self.capath = capath
self.cadata = cadata
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self.ciphers = ciphers
self.server_hostname = server_hostname
class Connection(object):
"""This is the core class that implements communication with RabbitMQ. This
class should not be invoked directly but rather through the use of an
adapter such as SelectConnection or BlockingConnection.
"""
# Disable pylint messages concerning "method could be a funciton"
# pylint: disable=R0201
ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure'
ON_CONNECTION_BLOCKED = '_on_connection_blocked'
ON_CONNECTION_CLOSED = '_on_connection_closed'
ON_CONNECTION_ERROR = '_on_connection_error'
ON_CONNECTION_OPEN = '_on_connection_open'
ON_CONNECTION_UNBLOCKED = '_on_connection_unblocked'
CONNECTION_CLOSED = 0
CONNECTION_INIT = 1
CONNECTION_PROTOCOL = 2
CONNECTION_START = 3
CONNECTION_TUNE = 4
CONNECTION_OPEN = 5
CONNECTION_CLOSING = 6 # client-initiated close in progress
_STATE_NAMES = {
CONNECTION_CLOSED: 'CLOSED',
CONNECTION_INIT: 'INIT',
CONNECTION_PROTOCOL: 'PROTOCOL',
CONNECTION_START: 'START',
CONNECTION_TUNE: 'TUNE',
CONNECTION_OPEN: 'OPEN',
CONNECTION_CLOSING: 'CLOSING'
}
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None):
"""Connection initialization expects an object that has implemented the
Parameters class and a callback function to notify when we have
successfully connected to the AMQP Broker.
Available Parameters classes are the ConnectionParameters class and
URLParameters class.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Called when the connection is opened:
on_open_callback(connection)
:param method on_open_error_callback: Called if the connection can't
be established: on_open_error_callback(connection, str|exception)
:param method on_close_callback: Called when the connection is closed:
`on_close_callback(connection, reason_code, reason_text)`, where
`reason_code` is either an IETF RFC 821 reply code for AMQP-level
closures or a value from `pika.connection.InternalCloseReasons` for
internal causes, such as socket errors.
"""
self.connection_state = self.CONNECTION_CLOSED
# Holds timer when the initial connect or reconnect is scheduled
self._connection_attempt_timer = None
# Used to hold timer if configured for Connection.Blocked timeout
self._blocked_conn_timer = None
self.heartbeat = None
# Set our configuration options
self.params = (copy.deepcopy(parameters) if parameters is not None else
ConnectionParameters())
# Define our callback dictionary
self.callbacks = pika_callback.CallbackManager()
# Attributes that will be properly initialized by _init_connection_state
# and/or during connection handshake.
self.server_capabilities = None
self.server_properties = None
self._body_max_length = None
self.known_hosts = None
self.closing = None
self._frame_buffer = None
self._channels = None
self._backpressure_multiplier = None
self.remaining_connection_attempts = None
self._init_connection_state()
# Add the on connection error callback
self.callbacks.add(0, self.ON_CONNECTION_ERROR,
on_open_error_callback or self._on_connection_error,
False)
# On connection callback
if on_open_callback:
self.add_on_open_callback(on_open_callback)
# On connection callback
if on_close_callback:
self.add_on_close_callback(on_close_callback)
self.connect()
def add_backpressure_callback(self, callback):
"""Call method "callback" when pika believes backpressure is being
applied.
:param method callback: The method to call
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0, self.ON_CONNECTION_BACKPRESSURE, callback,
False)
def add_on_close_callback(self, callback):
"""Add a callback notification when the connection has closed. The
callback will be passed the connection, the reason_code (int) and the
reply_text (str), where reason_code is either an IETF RFC 821 reply code
for AMQP-level closures or a value from
`pika.connection.InternalCloseReasons` for internal causes, such as
socket errors.
:param method callback: Callback to call on close
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0, self.ON_CONNECTION_CLOSED, callback, False)
def add_on_connection_blocked_callback(self, callback):
"""Add a callback to be |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.