hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79036be5b5f1fba5f514be0590b1bc5f815bec38
| 1,549
|
py
|
Python
|
manual_crawler.py
|
seanbreckenridge/MALUserVsAverage
|
8cc9c6bc3d19a1a0470235bd069e0fed632b1088
|
[
"Unlicense"
] | null | null | null |
manual_crawler.py
|
seanbreckenridge/MALUserVsAverage
|
8cc9c6bc3d19a1a0470235bd069e0fed632b1088
|
[
"Unlicense"
] | null | null | null |
manual_crawler.py
|
seanbreckenridge/MALUserVsAverage
|
8cc9c6bc3d19a1a0470235bd069e0fed632b1088
|
[
"Unlicense"
] | null | null | null |
import requests
import time
from bs4 import BeautifulSoup
class crawl:
"""Keep track of time between scrape requests.
args:
wait: time between requests
retry_max: number of times to retry
"""
def __init__(self, wait, retry_max):
self.wait = wait
self.retry_max = retry_max
self.last_scrape = time.time() - (self.wait * 0.5)
# can let user scrape faster the first time.
def since_scrape(self):
return (time.time() - self.last_scrape) > self.wait
def wait_till(self):
while not self.since_scrape():
time.sleep(1)
def get(self, url):
count = 0
while count < self.retry_max:
time.sleep(self.wait * count) # sleep for successively longer times
try:
self.wait_till()
response = requests.get(url)
self.last_scrape = time.time()
if response.status_code == requests.codes.ok:
return response
else:
raise Exception(
"Non-standard issue connecting to "
+ f"{url}: {response.status_code}."
)
except requests.exceptions.RequestException as e:
pass
count += 1
def get_html(self, url):
return self.get(url).text
def get_soup(self, url):
return BeautifulSoup(self.get(url).text, "html.parser")
def get_json(self, url):
return self.get(url).json()
| 29.226415
| 80
| 0.550032
|
import requests
import time
from bs4 import BeautifulSoup
class crawl:
def __init__(self, wait, retry_max):
self.wait = wait
self.retry_max = retry_max
self.last_scrape = time.time() - (self.wait * 0.5)
def since_scrape(self):
return (time.time() - self.last_scrape) > self.wait
def wait_till(self):
while not self.since_scrape():
time.sleep(1)
def get(self, url):
count = 0
while count < self.retry_max:
time.sleep(self.wait * count)
try:
self.wait_till()
response = requests.get(url)
self.last_scrape = time.time()
if response.status_code == requests.codes.ok:
return response
else:
raise Exception(
"Non-standard issue connecting to "
+ f"{url}: {response.status_code}."
)
except requests.exceptions.RequestException as e:
pass
count += 1
def get_html(self, url):
return self.get(url).text
def get_soup(self, url):
return BeautifulSoup(self.get(url).text, "html.parser")
def get_json(self, url):
return self.get(url).json()
| true
| true
|
79036c0074e467d7ce8b933e2563f2eeea9c3720
| 1,920
|
py
|
Python
|
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/blacklisted_set.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/blacklisted_set.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/blacklisted_set.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
class BlacklistedSet(set):
BLACKLIST_TIMEOUT = 60
def __init__(self, items=[], blacklist_timeout=BLACKLIST_TIMEOUT):
self.__dict = {}
self.__blacklist_timeout = blacklist_timeout
for item in items:
set.add(self, item)
def add(self, item):
self.__dict[item] = time.time()
set.add(self, item)
def __contains__(self, item):
return item in self.__dict and time.time() > self.__dict.get(item)
def __iter__(self):
for item in set.__iter__(self):
if time.time() > self.__dict.get(item):
yield item
def get_actual_size(self):
size = 0
for item in self.__iter__():
size += 1
return size
def get_item_at_index(self, index):
i = 0
for item in self.__iter__():
if i == index:
return item
i += 1
return None
def blacklist(self, item):
self.__dict[item] = time.time() + self.__blacklist_timeout
if __name__ == "__main__":
hosts = [1, 2, 3, 4]
bs = BlacklistedSet(hosts)
bs.blacklist(4)
print bs
for a in bs:
print a
time.sleep(2)
bs.blacklist(1)
bs.blacklist(5)
for a in bs:
print a
| 25.945946
| 72
| 0.697917
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
class BlacklistedSet(set):
BLACKLIST_TIMEOUT = 60
def __init__(self, items=[], blacklist_timeout=BLACKLIST_TIMEOUT):
self.__dict = {}
self.__blacklist_timeout = blacklist_timeout
for item in items:
set.add(self, item)
def add(self, item):
self.__dict[item] = time.time()
set.add(self, item)
def __contains__(self, item):
return item in self.__dict and time.time() > self.__dict.get(item)
def __iter__(self):
for item in set.__iter__(self):
if time.time() > self.__dict.get(item):
yield item
def get_actual_size(self):
size = 0
for item in self.__iter__():
size += 1
return size
def get_item_at_index(self, index):
i = 0
for item in self.__iter__():
if i == index:
return item
i += 1
return None
def blacklist(self, item):
self.__dict[item] = time.time() + self.__blacklist_timeout
if __name__ == "__main__":
hosts = [1, 2, 3, 4]
bs = BlacklistedSet(hosts)
bs.blacklist(4)
print bs
for a in bs:
print a
time.sleep(2)
bs.blacklist(1)
bs.blacklist(5)
for a in bs:
print a
| false
| true
|
79036c7370b3b6c2a59581cb2b70b6b7c349b2a7
| 3,603
|
py
|
Python
|
tests/sources/test_header_version.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_header_version.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_header_version.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test header version."""
import unittest
import os
import fnmatch
ignoredProtos = [
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba/clients/studio/plugins/ThymioVPL/UsageProfile.proto',
'projects/samples/tutorials/protos/FourWheelsRobot.proto'
]
skippedDirectories = [
'dependencies',
'distribution',
'.git'
]
class TestHeaderVersion(unittest.TestCase):
"""Unit test of the PROTO and world headers."""
def setUp(self):
"""Get all the PROTO files to be tested."""
# 1. Get Webots version (without revision)
self.version = None
with open(os.environ['WEBOTS_HOME'] + os.sep + 'resources' + os.sep + 'version.txt') as file:
content = file.read()
self.version = content.splitlines()[0].strip().split()[0]
# 2. Get all the PROTO files
self.files = []
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.proto'):
proto = os.path.join(rootPath, fileName)
shouldIgnore = False
for ignoredProto in ignoredProtos:
path = os.environ['WEBOTS_HOME'] + os.sep + ignoredProto.replace('/', os.sep)
if proto == path:
shouldIgnore = True
break
if not shouldIgnore:
self.files.append((proto, '#VRML_SIM %s utf8' % self.version))
# 3. Get all the world files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbt'):
world = os.path.join(rootPath, fileName)
self.files.append((world, '#VRML_SIM %s utf8' % self.version))
# 4. Get all the .wbproj files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbproj'):
projFile = os.path.join(rootPath, fileName)
self.files.append((projFile, 'Webots Project File version %s' % self.version))
def test_header_version(self):
"""Test that the PROTO and world files have the correct header."""
for currentFile in self.files:
fileToTest = currentFile[0]
with open(fileToTest) as file:
content = file.read()
if content == '':
continue
line = content.splitlines()[0].strip()
self.assertTrue(
line.startswith(currentFile[1]),
msg='Wrong header in file: "%s"' % fileToTest
)
if __name__ == '__main__':
unittest.main()
| 40.943182
| 120
| 0.611157
|
import unittest
import os
import fnmatch
ignoredProtos = [
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba/clients/studio/plugins/ThymioVPL/UsageProfile.proto',
'projects/samples/tutorials/protos/FourWheelsRobot.proto'
]
skippedDirectories = [
'dependencies',
'distribution',
'.git'
]
class TestHeaderVersion(unittest.TestCase):
def setUp(self):
self.version = None
with open(os.environ['WEBOTS_HOME'] + os.sep + 'resources' + os.sep + 'version.txt') as file:
content = file.read()
self.version = content.splitlines()[0].strip().split()[0]
self.files = []
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.proto'):
proto = os.path.join(rootPath, fileName)
shouldIgnore = False
for ignoredProto in ignoredProtos:
path = os.environ['WEBOTS_HOME'] + os.sep + ignoredProto.replace('/', os.sep)
if proto == path:
shouldIgnore = True
break
if not shouldIgnore:
self.files.append((proto, '#VRML_SIM %s utf8' % self.version))
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbt'):
world = os.path.join(rootPath, fileName)
self.files.append((world, '#VRML_SIM %s utf8' % self.version))
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbproj'):
projFile = os.path.join(rootPath, fileName)
self.files.append((projFile, 'Webots Project File version %s' % self.version))
def test_header_version(self):
for currentFile in self.files:
fileToTest = currentFile[0]
with open(fileToTest) as file:
content = file.read()
if content == '':
continue
line = content.splitlines()[0].strip()
self.assertTrue(
line.startswith(currentFile[1]),
msg='Wrong header in file: "%s"' % fileToTest
)
if __name__ == '__main__':
unittest.main()
| true
| true
|
79036ca5647a8c4ae722fb21e378769c2f0a26a7
| 18,450
|
py
|
Python
|
sdks/python/apache_beam/io/avroio_test.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-08-25T21:17:10.000Z
|
2020-08-25T21:17:10.000Z
|
sdks/python/apache_beam/io/avroio_test.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/io/avroio_test.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import json
import logging
import math
import os
import tempfile
import unittest
from builtins import range
from typing import List
import sys
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import hamcrest as hc
import avro
import avro.datafile
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
from fastavro.schema import parse_schema
from fastavro import writer
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from avro.schema import Parse # avro-python3 library for python3
except ImportError:
from avro.schema import parse as Parse # avro library for python2
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
import apache_beam as beam
from apache_beam import Create
from apache_beam.io import avroio
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io import source_test_utils
from apache_beam.io.avroio import _create_avro_sink # For testing
from apache_beam.io.avroio import _create_avro_source # For testing
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# Import snappy optionally; some tests will be skipped when import fails.
try:
import snappy # pylint: disable=import-error
except ImportError:
snappy = None # pylint: disable=invalid-name
logging.warning('python-snappy is not installed; some tests will be skipped.')
RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
}, {
'name': 'Henry', 'favorite_number': 3, 'favorite_color': 'green'
}, {
'name': 'Toby', 'favorite_number': 7, 'favorite_color': 'brown'
}, {
'name': 'Gordon', 'favorite_number': 4, 'favorite_color': 'blue'
}, {
'name': 'Emily', 'favorite_number': -1, 'favorite_color': 'Red'
}, {
'name': 'Percy', 'favorite_number': 6, 'favorite_color': 'Green'
}]
class AvroBase(object):
_temp_files = [] # type: List[str]
def __init__(self, methodName='runTest'):
super(AvroBase, self).__init__(methodName)
self.RECORDS = RECORDS
self.SCHEMA_STRING = '''
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
'''
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def tearDown(self):
for path in self._temp_files:
if os.path.exists(path):
os.remove(path)
self._temp_files = []
def _write_data(self, directory, prefix, codec, count, sync_interval):
raise NotImplementedError
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp()
file_name = None
for _ in range(num_files):
file_name = self._write_data(directory=temp_dir, prefix='mytemp')
assert file_name
file_name_prefix = file_name[:file_name.rfind(os.path.sep)]
return file_name_prefix + os.path.sep + 'mytemp*'
def _run_avro_test(
self, pattern, desired_bundle_size, perform_splitting, expected_result):
source = _create_avro_source(pattern, use_fastavro=self.use_fastavro)
if perform_splitting:
assert desired_bundle_size
splits = [
split
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(splits) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
sources_info = [(split.source, split.start_position, split.stop_position)
for split in splits]
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_avro_source'
source = \
_create_avro_source(
file_name,
validate=False,
use_fastavro=self.use_fastavro
)
dd = DisplayData.create_from(source)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_avro_source'
read = \
avroio.ReadFromAvro(
file_name,
validate=False,
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(read)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_avro_sink'
sink = _create_avro_sink(
file_name,
self.SCHEMA,
'null',
'.end',
0,
None,
'application/x-avro',
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'null'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_avro_sink'
write = avroio.WriteToAvro(
file_name, self.SCHEMA, use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('codec', 'deflate'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_reentrant_without_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reantrant_with_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=100000)]
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_read_without_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, 10000, True, expected_result)
def test_split_points(self):
num_records = 12000
sync_interval = 16000
file_name = self._write_data(count=num_records, sync_interval=sync_interval)
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There will be a total of num_blocks in the generated test file,
# proportional to number of records in the file divided by syncronization
# interval used by avro during write. Each block has more than 10 records.
num_blocks = int(math.ceil(14.5 * num_records / sync_interval))
assert num_blocks > 1
# When reading records of the first block, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEqual(
split_points_report[:10],
[(0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)] * 10)
# When reading records of last block, range_tracker.split_points() should
# return (num_blocks - 1, 1)
self.assertEqual(split_points_report[-10:], [(num_blocks - 1, 1)] * 10)
def test_read_without_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_without_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_with_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_read_without_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, None, False, expected_result)
def test_read_with_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, 100, True, expected_result)
def test_dynamic_work_rebalancing_exhaustive(self):
def compare_split_points(file_name):
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [
split for split in source.split(desired_bundle_size=float('inf'))
]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(splits[0].source)
# Adjusting block size so that we can perform a exhaustive dynamic
# work rebalancing test that completes within an acceptable amount of time.
file_name = self._write_data(count=5, sync_interval=2)
compare_split_points(file_name)
def test_corrupted_file(self):
file_name = self._write_data()
with open(file_name, 'rb') as f:
data = f.read()
# Corrupt the last character of the file which is also the last character of
# the last sync_marker.
# https://avro.apache.org/docs/current/spec.html#Object+Container+Files
corrupted_data = bytearray(data)
corrupted_data[-1] = (corrupted_data[-1] + 1) % 256
with tempfile.NamedTemporaryFile(delete=False,
prefix=tempfile.template) as f:
f.write(corrupted_data)
corrupted_file_name = f.name
source = _create_avro_source(
corrupted_file_name, use_fastavro=self.use_fastavro)
with self.assertRaisesRegex(ValueError, r'expected sync marker'):
source_test_utils.read_from_source(source, None, None)
def test_read_from_avro(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p | avroio.ReadFromAvro(path, use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 3))
def test_read_all_from_avro_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 5))
def test_read_all_from_avro_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 10))
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(path, self.SCHEMA, use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_sink_transform_snappy(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(
path,
self.SCHEMA,
codec='snappy',
use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(
sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3. '
'TODO: BEAM-6522.')
class TestAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestAvro, self).__init__(methodName)
self.use_fastavro = False
self.SCHEMA = Parse(self.SCHEMA_STRING)
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
sync_interval=avro.datafile.SYNC_INTERVAL):
old_sync_interval = avro.datafile.SYNC_INTERVAL
try:
avro.datafile.SYNC_INTERVAL = sync_interval
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix) as f:
writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
len_records = len(self.RECORDS)
for i in range(count):
writer.append(self.RECORDS[i % len_records])
writer.close()
self._temp_files.append(f.name)
return f.name
finally:
avro.datafile.SYNC_INTERVAL = old_sync_interval
class TestFastAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestFastAvro, self).__init__(methodName)
self.use_fastavro = True
self.SCHEMA = parse_schema(json.loads(self.SCHEMA_STRING))
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
**kwargs):
all_records = self.RECORDS * \
(count // len(self.RECORDS)) + self.RECORDS[:(count % len(self.RECORDS))]
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix,
mode='w+b') as f:
writer(f, self.SCHEMA, all_records, codec=codec, **kwargs)
self._temp_files.append(f.name)
return f.name
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 36.247544
| 80
| 0.692466
|
from __future__ import absolute_import
from __future__ import division
import json
import logging
import math
import os
import tempfile
import unittest
from builtins import range
from typing import List
import sys
import future.tests.base
import hamcrest as hc
import avro
import avro.datafile
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
from fastavro.schema import parse_schema
from fastavro import writer
try:
from avro.schema import Parse
except ImportError:
from avro.schema import parse as Parse
import apache_beam as beam
from apache_beam import Create
from apache_beam.io import avroio
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io import source_test_utils
from apache_beam.io.avroio import _create_avro_sink
from apache_beam.io.avroio import _create_avro_source
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
try:
import snappy
except ImportError:
snappy = None
logging.warning('python-snappy is not installed; some tests will be skipped.')
RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
}, {
'name': 'Henry', 'favorite_number': 3, 'favorite_color': 'green'
}, {
'name': 'Toby', 'favorite_number': 7, 'favorite_color': 'brown'
}, {
'name': 'Gordon', 'favorite_number': 4, 'favorite_color': 'blue'
}, {
'name': 'Emily', 'favorite_number': -1, 'favorite_color': 'Red'
}, {
'name': 'Percy', 'favorite_number': 6, 'favorite_color': 'Green'
}]
class AvroBase(object):
_temp_files = []
def __init__(self, methodName='runTest'):
super(AvroBase, self).__init__(methodName)
self.RECORDS = RECORDS
self.SCHEMA_STRING = '''
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
'''
@classmethod
def setUpClass(cls):
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def tearDown(self):
for path in self._temp_files:
if os.path.exists(path):
os.remove(path)
self._temp_files = []
def _write_data(self, directory, prefix, codec, count, sync_interval):
raise NotImplementedError
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp()
file_name = None
for _ in range(num_files):
file_name = self._write_data(directory=temp_dir, prefix='mytemp')
assert file_name
file_name_prefix = file_name[:file_name.rfind(os.path.sep)]
return file_name_prefix + os.path.sep + 'mytemp*'
def _run_avro_test(
self, pattern, desired_bundle_size, perform_splitting, expected_result):
source = _create_avro_source(pattern, use_fastavro=self.use_fastavro)
if perform_splitting:
assert desired_bundle_size
splits = [
split
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(splits) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
sources_info = [(split.source, split.start_position, split.stop_position)
for split in splits]
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_avro_source'
source = \
_create_avro_source(
file_name,
validate=False,
use_fastavro=self.use_fastavro
)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_avro_source'
read = \
avroio.ReadFromAvro(
file_name,
validate=False,
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(read)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_avro_sink'
sink = _create_avro_sink(
file_name,
self.SCHEMA,
'null',
'.end',
0,
None,
'application/x-avro',
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'null'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_avro_sink'
write = avroio.WriteToAvro(
file_name, self.SCHEMA, use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('codec', 'deflate'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_reentrant_without_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reantrant_with_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=100000)]
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_read_without_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, 10000, True, expected_result)
def test_split_points(self):
num_records = 12000
sync_interval = 16000
file_name = self._write_data(count=num_records, sync_interval=sync_interval)
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
num_blocks = int(math.ceil(14.5 * num_records / sync_interval))
assert num_blocks > 1
self.assertEqual(
split_points_report[:10],
[(0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)] * 10)
self.assertEqual(split_points_report[-10:], [(num_blocks - 1, 1)] * 10)
def test_read_without_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_without_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_with_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_read_without_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, None, False, expected_result)
def test_read_with_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, 100, True, expected_result)
def test_dynamic_work_rebalancing_exhaustive(self):
def compare_split_points(file_name):
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [
split for split in source.split(desired_bundle_size=float('inf'))
]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(splits[0].source)
file_name = self._write_data(count=5, sync_interval=2)
compare_split_points(file_name)
def test_corrupted_file(self):
file_name = self._write_data()
with open(file_name, 'rb') as f:
data = f.read()
ytearray(data)
corrupted_data[-1] = (corrupted_data[-1] + 1) % 256
with tempfile.NamedTemporaryFile(delete=False,
prefix=tempfile.template) as f:
f.write(corrupted_data)
corrupted_file_name = f.name
source = _create_avro_source(
corrupted_file_name, use_fastavro=self.use_fastavro)
with self.assertRaisesRegex(ValueError, r'expected sync marker'):
source_test_utils.read_from_source(source, None, None)
def test_read_from_avro(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p | avroio.ReadFromAvro(path, use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 3))
def test_read_all_from_avro_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 5))
def test_read_all_from_avro_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 10))
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(path, self.SCHEMA, use_fastavro=self.use_fastavro)
with TestPipeline() as p:
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_sink_transform_snappy(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(
path,
self.SCHEMA,
codec='snappy',
use_fastavro=self.use_fastavro)
with TestPipeline() as p:
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(
sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3. '
'TODO: BEAM-6522.')
class TestAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestAvro, self).__init__(methodName)
self.use_fastavro = False
self.SCHEMA = Parse(self.SCHEMA_STRING)
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
sync_interval=avro.datafile.SYNC_INTERVAL):
old_sync_interval = avro.datafile.SYNC_INTERVAL
try:
avro.datafile.SYNC_INTERVAL = sync_interval
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix) as f:
writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
len_records = len(self.RECORDS)
for i in range(count):
writer.append(self.RECORDS[i % len_records])
writer.close()
self._temp_files.append(f.name)
return f.name
finally:
avro.datafile.SYNC_INTERVAL = old_sync_interval
class TestFastAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestFastAvro, self).__init__(methodName)
self.use_fastavro = True
self.SCHEMA = parse_schema(json.loads(self.SCHEMA_STRING))
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
**kwargs):
all_records = self.RECORDS * \
(count // len(self.RECORDS)) + self.RECORDS[:(count % len(self.RECORDS))]
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix,
mode='w+b') as f:
writer(f, self.SCHEMA, all_records, codec=codec, **kwargs)
self._temp_files.append(f.name)
return f.name
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| true
| true
|
79036e10662062fbef5a2426ab989a8a22a76e17
| 176,147
|
py
|
Python
|
azure_compute/komand_azure_compute/actions/list_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
azure_compute/komand_azure_compute/actions/list_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
azure_compute/komand_azure_compute/actions/list_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
RESOURCEGROUP = "resourceGroup"
SUBSCRIPTIONID = "subscriptionId"
class Output:
VALUE = "value"
class ListVmInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"resourceGroup": {
"type": "string",
"title": "Resource Group",
"description": "The resource group that will contain the virtual machine",
"order": 2
},
"subscriptionId": {
"type": "string",
"title": "Subscription ID",
"description": "The identifier of your subscription",
"order": 1
}
},
"required": [
"subscriptionId",
"resourceGroup"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ListVmOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"value": {
"type": "array",
"title": "Value",
"description": "List items virtual machine in a resource group",
"items": {
"$ref": "#/definitions/value_vm"
},
"order": 1
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"value_vm": {
"type": "object",
"title": "value_vm",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the identifying url of the virtual machine",
"order": 1
},
"location": {
"type": "string",
"title": "Location",
"description": "Specifies the supported Azure location where the virtual machine should be created",
"order": 2
},
"name": {
"type": "string",
"title": "Name Virtual Machine",
"description": "The name of the virtual machine",
"order": 3
},
"properties": {
"$ref": "#/definitions/properties",
"title": "Properties",
"description": "Specifies the properties of the virtual machine",
"order": 4
},
"tags": {
"$ref": "#/definitions/tags",
"title": "Tags",
"description": "Specifies the tags that are assigned to the virtual machine",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"description": "Specifies the type of compute resource",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 37.224641
| 177
| 0.39752
|
import komand
import json
class Input:
RESOURCEGROUP = "resourceGroup"
SUBSCRIPTIONID = "subscriptionId"
class Output:
VALUE = "value"
class ListVmInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"resourceGroup": {
"type": "string",
"title": "Resource Group",
"description": "The resource group that will contain the virtual machine",
"order": 2
},
"subscriptionId": {
"type": "string",
"title": "Subscription ID",
"description": "The identifier of your subscription",
"order": 1
}
},
"required": [
"subscriptionId",
"resourceGroup"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ListVmOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"value": {
"type": "array",
"title": "Value",
"description": "List items virtual machine in a resource group",
"items": {
"$ref": "#/definitions/value_vm"
},
"order": 1
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"value_vm": {
"type": "object",
"title": "value_vm",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the identifying url of the virtual machine",
"order": 1
},
"location": {
"type": "string",
"title": "Location",
"description": "Specifies the supported Azure location where the virtual machine should be created",
"order": 2
},
"name": {
"type": "string",
"title": "Name Virtual Machine",
"description": "The name of the virtual machine",
"order": 3
},
"properties": {
"$ref": "#/definitions/properties",
"title": "Properties",
"description": "Specifies the properties of the virtual machine",
"order": 4
},
"tags": {
"$ref": "#/definitions/tags",
"title": "Tags",
"description": "Specifies the tags that are assigned to the virtual machine",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"description": "Specifies the type of compute resource",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true
| true
|
79036f096fea5eaae0c7bd3e84ab079718fe2a88
| 1,992
|
py
|
Python
|
wbb/modules/webss.py
|
TAMILVIP007/WilliamButcherBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | 1
|
2021-06-30T07:09:45.000Z
|
2021-06-30T07:09:45.000Z
|
wbb/modules/webss.py
|
fakeenemy01/GroupBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | null | null | null |
wbb/modules/webss.py
|
fakeenemy01/GroupBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyrogram import filters
from wbb import app
from wbb.core.decorators.errors import capture_err
__MODULE__ = "WebSS"
__HELP__ = "/webss | .webss [URL] - Take A Screenshot Of A Webpage"
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
try:
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Taking Screenshot**")
await m.edit("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except TypeError:
return await m.edit("No Such Website.")
await m.delete()
except Exception as e:
await message.reply_text(str(e))
| 36.888889
| 78
| 0.705823
|
from pyrogram import filters
from wbb import app
from wbb.core.decorators.errors import capture_err
__MODULE__ = "WebSS"
__HELP__ = "/webss | .webss [URL] - Take A Screenshot Of A Webpage"
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
try:
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Taking Screenshot**")
await m.edit("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except TypeError:
return await m.edit("No Such Website.")
await m.delete()
except Exception as e:
await message.reply_text(str(e))
| true
| true
|
79036fb8927da1a20e29d990f89eb1771a371915
| 27,299
|
py
|
Python
|
core/server.py
|
simra/msrflute
|
c28e2e6bcfa9464b8640ccd625393bbed28491c3
|
[
"MIT"
] | null | null | null |
core/server.py
|
simra/msrflute
|
c28e2e6bcfa9464b8640ccd625393bbed28491c3
|
[
"MIT"
] | null | null | null |
core/server.py
|
simra/msrflute
|
c28e2e6bcfa9464b8640ccd625393bbed28491c3
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
In this file, we define the classes that live inside 'worker 0', the worker
responsible for orchestration and aggregation. The main class is the
OptimizationServer, which sends clients to the other workers to process and
combines the resulting models.
'''
import json
import logging
import os
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import torch
# Internal imports
from core.globals import TRAINING_FRAMEWORK_TYPE
if TRAINING_FRAMEWORK_TYPE == 'mpi':
import core.federated as federated
else:
raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))
from core.evaluation import Evaluation
from core.client import Client
from .strategies import select_strategy
from .trainer import (
ModelUpdater,
Trainer,
set_component_wise_lr,
)
from utils import (
get_lr,
print_rank,
update_json_log,
)
# For profiling
import cProfile
import pstats
# AzureML-related libs
from azureml.core import Run
run = Run.get_context()
class OptimizationServer(federated.Server):
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,
val_dataloader, test_dataloader, config, config_server):
'''Implement Server's orchestration and aggregation.
This is the main Server class, that actually implements orchestration
and aggregation, inheriting from `federated.Server`, which deals with
communication only.
The `train` method is central in FLUTE, as it defines good part of what
happens during training.
Args:
num_clients (int): total available clients.
model (torch.nn.Module): neural network model.
optimizer (torch.optim.Optimizer): optimizer.
ss_scheduler: scheduled sampling scheduler.
data_path (str): points to where data is.
model_path (str): points to where pretrained model is.
train_dataloader (torch.utils.data.DataLoader): dataloader for training
val_dataloader (torch.utils.data.DataLoader): dataloader for validation
test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None
config (dict): JSON style configuration parameters
config_server: deprecated, kept for API compatibility only.
'''
super().__init__()
# Initialize all attributes from arguments
self.client_idx_list = list(range(num_clients))
self.config = config
server_config = config['server_config']
decoder_config = config.get('decoder_config', None)
self.max_iteration = server_config['max_iteration']
self.do_clustering = server_config.get('clustering', False)
self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \
if isinstance(server_config['num_clients_per_iteration'], str) \
else [server_config['num_clients_per_iteration']]
self.val_freq = server_config['val_freq']
self.req_freq = server_config['rec_freq']
self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)
# TODO: does this need to be adjusted for custom metrics?
self.metrics = {
'best_val_loss': float('inf'),
'best_val_acc': 0.0,
'best_test_loss': float('inf'),
'best_test_acc': 0.0
}
self.model_backup_freq = server_config.get('model_backup_freq', 100)
self.worker_trainer_config = server_config.get('trainer_config', {})
self.aggregate_median = server_config['aggregate_median']
self.initial_lr_client = server_config.get('initial_lr_client', -1.0)
self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)
self.model_type = config['model_config']['model_type']
self.quant_thresh = config['client_config'].get('quant_thresh', None)
self.quant_bits = config['client_config'].get('quant_bits', 10)
self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']
self.data_path = data_path
# Get max grad norm from data config
if 'train' in server_config['data_config']:
max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)
else:
max_grad_norm = None
# Creating an instance to update the model with stats aggregated from workers
self.worker_trainer = ModelUpdater(
model=model,
optimizer=optimizer,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,
val_dataloader=val_dataloader,
max_grad_norm=max_grad_norm,
anneal_config=server_config['annealing_config'],
model_type=self.model_type,
decoder_config=decoder_config
)
self.metrics['worker_trainer'] = self.worker_trainer
# Creating an instance for the server-side trainer (runs mini-batch SGD)
self.server_replay_iterations = None
self.server_trainer = None
if train_dataloader is not None:
assert 'server_replay_config' in server_config, 'server_replay_config is not set'
assert 'optimizer_config' in server_config[
'server_replay_config'], 'server-side replay training optimizer is not set'
self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']
self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})
self.server_replay_iterations = server_config['server_replay_config']['server_iterations']
self.server_trainer = Trainer(
model=model,
optimizer=None,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader,
server_replay_config=server_config['server_replay_config'],
val_dataloader=None,
max_grad_norm=server_config['server_replay_config']\
.get('max_grad_norm',server_config['data_config']['train']\
.get('max_grad_norm',None)),
anneal_config=server_config['server_replay_config'].get('annealing_config', None)
)
self.skip_model_update = False # will not update the model if True
self.train_loss = 0.0
self.model_path = model_path
self.best_model_criterion = server_config['best_model_criterion']
self.fall_back_to_best_model = server_config['fall_back_to_best_model']
self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')
self.best_model_path = os.path.join(self.model_path,
'best_val_{}_model.tar'.format(self.best_model_criterion))
self.log_path = os.path.join(self.model_path, 'status_log.json')
self.cur_iter_no = 0 # keep the iteration number for Tensor board plotting
self.lr_weight = 1.0
self.losses = []
self.no_label_updates = 0 # no. label updates
# Update the parameters above if the log file
if server_config.get('resume_from_checkpoint', False):
self.load_saved_status()
# Decoding config
self.decoder_config = decoder_config
self.spm_model = server_config['data_config']['test'].get('spm_model', None)
self.do_profiling = server_config.get('do_profiling', False)
# Parallel processing
self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)
StrategyClass = select_strategy(config['strategy'])
self.strategy = StrategyClass('server', self.config, self.model_path)
print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self):
'''Load checkpoint from disk'''
# Check if model is on disk, if so loads it onto trainer
if os.path.exists(self.last_model_path):
print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))
self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
# Check if log is on disk, if so loads it onto current stats
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as logfp: # loading the iteration no., best loss and CER
elems = json.load(logfp)
self.cur_iter_no = elems.get('i', 0)
self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))
self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)
self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))
self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)
self.lr_weight = elems.get('weight', 1.0)
self.no_label_updates = elems.get('num_label_updates', 0)
print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self):
'''Trigger training.
This is a simple wrapper to the `train` method.
'''
print_rank('server started')
self.train()
print_rank('server terminated')
def train(self):
'''Main method for training.'''
self.run_stats = {
'secsPerClientRound': [],
'secsPerClient': [],
'secsPerClientTraining': [],
'secsPerClientSetup': [],
'secsPerClientFull': [],
'secsPerRoundHousekeeping': [],
'secsPerRoundTotal': [],
'mpiCosts': []
}
run.log('Max iterations', self.max_iteration)
try:
self.worker_trainer.model.cuda() if torch.cuda.is_available() else None
# Do an initial validation round to understand the pretrained model's validation accuracy
# Skip if we resumed from a checkpoint (cur_iter_no > 0)
eval_list = []
if self.cur_iter_no == 0:
if self.config['server_config']['initial_rec']:
eval_list.append('test')
if self.config['server_config']['initial_val']:
eval_list.append('val')
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
print_rank("Running {} at itr={}".format(eval_list, self.cur_iter_no))
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
eval_list = [] # some cleanup
# Dump all the information in aggregate_metric
print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)
for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:
self.worker_trainer.save(
model_path=self.model_path,
token=token,
config=self.config['server_config']
)
# Training loop
self.worker_trainer.model.train()
for i in range(self.cur_iter_no, self.max_iteration):
begin = time.time()
metrics_payload = {}
def log_metric(k, v):
metrics_payload[k] = v
print_rank('==== iteration {}'.format(i))
log_metric('Current iteration', i)
# Initial value for the learning rate of the worker
initial_lr = self.initial_lr_client * self.lr_weight
print_rank('Client learning rate {}'.format(initial_lr))
# Run training on clients
self.worker_trainer.model.zero_grad()
self.train_loss = []
server_data = (
initial_lr,
[p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]
)
# Random number of clients per iteration
if len(self.num_clients_per_iteration) > 1:
num_clients_curr_iter = random.randint(
self.num_clients_per_iteration[0],
self.num_clients_per_iteration[1]
)
else:
num_clients_curr_iter = self.num_clients_per_iteration[0]
log_metric('Clients for round', num_clients_curr_iter)
# Perform annealing in quantization threshold
if self.quant_thresh is not None:
self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)
self.quant_thresh = self.config['client_config']['quant_thresh']
log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])
# Create the pool of clients -- sample from this pool to assign to workers
sampled_idx_clients = random.sample(self.client_idx_list,
num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list
sampled_clients = [
Client(
client_id,
self.config,
self.config['client_config']['type'] == 'optimization',
None
) for client_id in sampled_idx_clients
]
# Initialize stats
clients_begin = time.time()
client_losses = []
client_mag_grads = []
client_mean_grads = []
client_var_grads = []
client_norm_grads = []
self.run_stats['secsPerClient'].append([])
self.run_stats['secsPerClientFull'].append([])
self.run_stats['secsPerClientTraining'].append([])
self.run_stats['secsPerClientSetup'].append([])
self.run_stats['mpiCosts'].append([])
# Check if we want privacy metrics
apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \
self.config['privacy_metrics_config']['apply_metrics']
adaptive_leakage = apply_privacy_metrics and \
self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)
if apply_privacy_metrics:
privacy_metrics_stats = defaultdict(list)
# Initialize profiler
profiler = None
if self.do_profiling:
profiler = cProfile.Profile()
profiler.enable()
# Reset gradient for the model before assigning the new gradients
self.worker_trainer.model.zero_grad()
for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):
# Process client output
client_timestamp = client_output['ts']
client_stats = client_output['cs']
client_loss = client_output['tl']
client_mag_grad = client_output['mg']
client_mean_grad = client_output['ng']
client_var_grad = client_output['vg']
client_norm_grad = client_output['rg']
client_payload = client_output['pl']
if apply_privacy_metrics:
privacy_stats = client_output['ps']
for metric, value in privacy_stats.items():
privacy_metrics_stats[metric].append(value)
self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)
# Get actual pseudo-gradients for aggregation
payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)
if not payload_processed:
print_rank('Dropping client', loglevel=logging.DEBUG)
num_clients_curr_iter -= 1
continue
# Aggregate stats
self.train_loss.append(client_loss)
client_losses.append(client_loss)
client_mag_grads.append(client_mag_grad.item())
client_mean_grads.append(client_mean_grad.item())
client_var_grads.append(client_var_grad.item())
client_norm_grads.append(client_norm_grad.item())
# Mark the end of client processing
client_end = time.time()
self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])
self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])
self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])
self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)
# Tear down profiler
if self.do_profiling:
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats()
# Prepare output
client_mag_grads = np.array(client_mag_grads)
client_mean_grads = np.array(client_mean_grads)
client_var_grads = np.array(client_var_grads)
client_norm_grads = np.array(client_norm_grads)
client_stats = (client_mag_grads, client_mean_grads, client_var_grads)
dump_norm_stats = self.config.get('dump_norm_stats', False)
if dump_norm_stats:
with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:
outF.write('{}\n'.format(json.dumps(list(client_norm_grads))))
# Print the privacy metrics
if apply_privacy_metrics:
for metric, values in privacy_metrics_stats.items():
if metric == 'Dropped clients':
log_metric(metric, sum(values))
else:
log_metric(metric, max(values))
if type(adaptive_leakage) is float:
values = privacy_metrics_stats['Practical epsilon (Max leakage)']
new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]
print_rank('Updating leakage threshold to {}'.format(new_threshold))
self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold
# Mark that all clients have been processed
end = time.time()
self.run_stats['secsPerClientRound'].append(end - begin)
begin = end
# Log the training loss to tensorboard/AML
log_metric('Training loss', sum(self.train_loss))
# Combine payloads
self.losses = self.strategy.combine_payloads(
worker_trainer=self.worker_trainer,
curr_iter=i,
num_clients_curr_iter=num_clients_curr_iter,
client_stats=client_stats,
logger=log_metric,
)
# Run a couple of iterations of training data on the server
if self.server_trainer is not None:
print_rank('Running replay iterations on server')
if 'updatable_names' in self.server_trainer_config:
set_component_wise_lr(
self.worker_trainer.model,
self.server_optimizer_config,
self.server_trainer_config['updatable_names']
)
self.server_trainer.prepare_iteration(self.worker_trainer.model)
self.server_trainer.train_desired_samples(self.server_replay_iterations)
self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())
torch.cuda.empty_cache()
# Update a sampling scheduler
print_rank('Run ss scheduler')
self.worker_trainer.run_ss_scheduler()
# Run inference and score on val/test depending on the iter. number
if ((i+1) % self.val_freq) == 0:
eval_list.append("val")
if ((i+1) % self.req_freq) == 0 :
eval_list.append("test")
if len(eval_list)> 0:
print_rank('Running {} at itr={}'.format(eval_list,i+1))
self.metrics['worker_trainer'] = self.worker_trainer
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
self.losses = self.evaluation.losses
eval_list = []
# Create a schedule for the initial_lr (for the worker)
if 'val' in eval_list:
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
if not (self.losses[0] < self.metrics['best_val_loss']):
self.lr_weight *= self.lr_decay_factor
print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))
# Backup the current best models
self.backup_models(i)
# Fall back to the best model if the option is enabled
self.fall_back_to_prev_best_status()
# Logging the latest best values
update_json_log(
self.log_path,
{
'i': i + 1,
'best_val_loss': float(self.metrics['best_val_loss']),
'best_val_acc': float(self.metrics['best_val_acc']),
'best_test_loss': float(self.metrics['best_test_loss']),
'best_test_acc': float(self.metrics['best_test_acc']),
'weight': float(self.lr_weight),
'num_label_updates': int(self.no_label_updates)
},
)
end = time.time()
# Aggregate stats
self.run_stats['secsPerRoundHousekeeping'].append(end - begin)
self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \
self.run_stats['secsPerRoundHousekeeping'][-1])
log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])
if self.do_profiling:
log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])
log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])
metrics_for_stats = [
'secsPerClient',
'secsPerClientTraining',
'secsPerClientFull',
'secsPerClientSetup',
'mpiCosts',
]
for metric in metrics_for_stats:
log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))
log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))
log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))
for k in self.run_stats:
if k in metrics_for_stats:
print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)
else:
print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)
# Log all the metrics
for k in metrics_payload:
run.log(k, metrics_payload[k])
finally: # perform cleanup even if error was raised above
self.terminate_workers(terminate=(not self.do_clustering))
def backup_models(self, i):
'''Save the current best models.
Save CER model, the best loss model and the best WER model. This occurs
at a specified period.
Args:
i: no. of iterations.
'''
# Always save the latest model
self.worker_trainer.save(
model_path=self.model_path,
token='latest',
config=self.config['server_config'],
)
if (i % self.model_backup_freq) == 0: # save the current best models
self.worker_trainer.save(
model_path=self.model_path,
token='epoch{}'.format(i),
config=self.config['server_config']
)
for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:
src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))
if os.path.exists(src_model_path):
dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))
shutil.copyfile(src_model_path, dst_model_path)
print_rank('Saved {}'.format(dst_model_path))
def fall_back_to_prev_best_status(self):
'''Go back to the past best status and switch to the recent best model.'''
if self.fall_back_to_best_model:
print_rank('falling back to model {}'.format(self.best_model_path))
# Save current learning rate
tmp_lr = get_lr(self.worker_trainer.optimizer)
# Load previous best model
self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)
# Update previous learning rate on optimizer
for g in self.worker_trainer.optimizer.param_groups:
g['lr'] = tmp_lr
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
def select_server(server_type, config):
'''Select a server type using different possible strings.
Right now this just returns `OptimizationServer`, but this
function could be useful when there are multiple choices of
server.
Args:
server_type (str): indicates server choice.
config (dict): config parsed from YAML, passed so that
parameters can be used to select a given server.
'''
return OptimizationServer
| 45.271973
| 121
| 0.590534
|
import json
import logging
import os
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import torch
from core.globals import TRAINING_FRAMEWORK_TYPE
if TRAINING_FRAMEWORK_TYPE == 'mpi':
import core.federated as federated
else:
raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))
from core.evaluation import Evaluation
from core.client import Client
from .strategies import select_strategy
from .trainer import (
ModelUpdater,
Trainer,
set_component_wise_lr,
)
from utils import (
get_lr,
print_rank,
update_json_log,
)
import cProfile
import pstats
from azureml.core import Run
run = Run.get_context()
class OptimizationServer(federated.Server):
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,
val_dataloader, test_dataloader, config, config_server):
super().__init__()
self.client_idx_list = list(range(num_clients))
self.config = config
server_config = config['server_config']
decoder_config = config.get('decoder_config', None)
self.max_iteration = server_config['max_iteration']
self.do_clustering = server_config.get('clustering', False)
self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \
if isinstance(server_config['num_clients_per_iteration'], str) \
else [server_config['num_clients_per_iteration']]
self.val_freq = server_config['val_freq']
self.req_freq = server_config['rec_freq']
self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)
self.metrics = {
'best_val_loss': float('inf'),
'best_val_acc': 0.0,
'best_test_loss': float('inf'),
'best_test_acc': 0.0
}
self.model_backup_freq = server_config.get('model_backup_freq', 100)
self.worker_trainer_config = server_config.get('trainer_config', {})
self.aggregate_median = server_config['aggregate_median']
self.initial_lr_client = server_config.get('initial_lr_client', -1.0)
self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)
self.model_type = config['model_config']['model_type']
self.quant_thresh = config['client_config'].get('quant_thresh', None)
self.quant_bits = config['client_config'].get('quant_bits', 10)
self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']
self.data_path = data_path
if 'train' in server_config['data_config']:
max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)
else:
max_grad_norm = None
self.worker_trainer = ModelUpdater(
model=model,
optimizer=optimizer,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,
val_dataloader=val_dataloader,
max_grad_norm=max_grad_norm,
anneal_config=server_config['annealing_config'],
model_type=self.model_type,
decoder_config=decoder_config
)
self.metrics['worker_trainer'] = self.worker_trainer
self.server_replay_iterations = None
self.server_trainer = None
if train_dataloader is not None:
assert 'server_replay_config' in server_config, 'server_replay_config is not set'
assert 'optimizer_config' in server_config[
'server_replay_config'], 'server-side replay training optimizer is not set'
self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']
self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})
self.server_replay_iterations = server_config['server_replay_config']['server_iterations']
self.server_trainer = Trainer(
model=model,
optimizer=None,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader,
server_replay_config=server_config['server_replay_config'],
val_dataloader=None,
max_grad_norm=server_config['server_replay_config']\
.get('max_grad_norm',server_config['data_config']['train']\
.get('max_grad_norm',None)),
anneal_config=server_config['server_replay_config'].get('annealing_config', None)
)
self.skip_model_update = False
self.train_loss = 0.0
self.model_path = model_path
self.best_model_criterion = server_config['best_model_criterion']
self.fall_back_to_best_model = server_config['fall_back_to_best_model']
self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')
self.best_model_path = os.path.join(self.model_path,
'best_val_{}_model.tar'.format(self.best_model_criterion))
self.log_path = os.path.join(self.model_path, 'status_log.json')
self.cur_iter_no = 0
self.lr_weight = 1.0
self.losses = []
self.no_label_updates = 0
if server_config.get('resume_from_checkpoint', False):
self.load_saved_status()
self.decoder_config = decoder_config
self.spm_model = server_config['data_config']['test'].get('spm_model', None)
self.do_profiling = server_config.get('do_profiling', False)
self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)
StrategyClass = select_strategy(config['strategy'])
self.strategy = StrategyClass('server', self.config, self.model_path)
print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self):
if os.path.exists(self.last_model_path):
print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))
self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as logfp:
elems = json.load(logfp)
self.cur_iter_no = elems.get('i', 0)
self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))
self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)
self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))
self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)
self.lr_weight = elems.get('weight', 1.0)
self.no_label_updates = elems.get('num_label_updates', 0)
print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self):
print_rank('server started')
self.train()
print_rank('server terminated')
def train(self):
self.run_stats = {
'secsPerClientRound': [],
'secsPerClient': [],
'secsPerClientTraining': [],
'secsPerClientSetup': [],
'secsPerClientFull': [],
'secsPerRoundHousekeeping': [],
'secsPerRoundTotal': [],
'mpiCosts': []
}
run.log('Max iterations', self.max_iteration)
try:
self.worker_trainer.model.cuda() if torch.cuda.is_available() else None
# Skip if we resumed from a checkpoint (cur_iter_no > 0)
eval_list = []
if self.cur_iter_no == 0:
if self.config['server_config']['initial_rec']:
eval_list.append('test')
if self.config['server_config']['initial_val']:
eval_list.append('val')
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
print_rank("Running {} at itr={}".format(eval_list, self.cur_iter_no))
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
eval_list = [] # some cleanup
# Dump all the information in aggregate_metric
print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)
for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:
self.worker_trainer.save(
model_path=self.model_path,
token=token,
config=self.config['server_config']
)
# Training loop
self.worker_trainer.model.train()
for i in range(self.cur_iter_no, self.max_iteration):
begin = time.time()
metrics_payload = {}
def log_metric(k, v):
metrics_payload[k] = v
print_rank('==== iteration {}'.format(i))
log_metric('Current iteration', i)
# Initial value for the learning rate of the worker
initial_lr = self.initial_lr_client * self.lr_weight
print_rank('Client learning rate {}'.format(initial_lr))
# Run training on clients
self.worker_trainer.model.zero_grad()
self.train_loss = []
server_data = (
initial_lr,
[p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]
)
# Random number of clients per iteration
if len(self.num_clients_per_iteration) > 1:
num_clients_curr_iter = random.randint(
self.num_clients_per_iteration[0],
self.num_clients_per_iteration[1]
)
else:
num_clients_curr_iter = self.num_clients_per_iteration[0]
log_metric('Clients for round', num_clients_curr_iter)
# Perform annealing in quantization threshold
if self.quant_thresh is not None:
self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)
self.quant_thresh = self.config['client_config']['quant_thresh']
log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])
# Create the pool of clients -- sample from this pool to assign to workers
sampled_idx_clients = random.sample(self.client_idx_list,
num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list
sampled_clients = [
Client(
client_id,
self.config,
self.config['client_config']['type'] == 'optimization',
None
) for client_id in sampled_idx_clients
]
# Initialize stats
clients_begin = time.time()
client_losses = []
client_mag_grads = []
client_mean_grads = []
client_var_grads = []
client_norm_grads = []
self.run_stats['secsPerClient'].append([])
self.run_stats['secsPerClientFull'].append([])
self.run_stats['secsPerClientTraining'].append([])
self.run_stats['secsPerClientSetup'].append([])
self.run_stats['mpiCosts'].append([])
# Check if we want privacy metrics
apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \
self.config['privacy_metrics_config']['apply_metrics']
adaptive_leakage = apply_privacy_metrics and \
self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)
if apply_privacy_metrics:
privacy_metrics_stats = defaultdict(list)
# Initialize profiler
profiler = None
if self.do_profiling:
profiler = cProfile.Profile()
profiler.enable()
# Reset gradient for the model before assigning the new gradients
self.worker_trainer.model.zero_grad()
for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):
# Process client output
client_timestamp = client_output['ts']
client_stats = client_output['cs']
client_loss = client_output['tl']
client_mag_grad = client_output['mg']
client_mean_grad = client_output['ng']
client_var_grad = client_output['vg']
client_norm_grad = client_output['rg']
client_payload = client_output['pl']
if apply_privacy_metrics:
privacy_stats = client_output['ps']
for metric, value in privacy_stats.items():
privacy_metrics_stats[metric].append(value)
self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)
# Get actual pseudo-gradients for aggregation
payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)
if not payload_processed:
print_rank('Dropping client', loglevel=logging.DEBUG)
num_clients_curr_iter -= 1
continue
# Aggregate stats
self.train_loss.append(client_loss)
client_losses.append(client_loss)
client_mag_grads.append(client_mag_grad.item())
client_mean_grads.append(client_mean_grad.item())
client_var_grads.append(client_var_grad.item())
client_norm_grads.append(client_norm_grad.item())
# Mark the end of client processing
client_end = time.time()
self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])
self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])
self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])
self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)
# Tear down profiler
if self.do_profiling:
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats()
# Prepare output
client_mag_grads = np.array(client_mag_grads)
client_mean_grads = np.array(client_mean_grads)
client_var_grads = np.array(client_var_grads)
client_norm_grads = np.array(client_norm_grads)
client_stats = (client_mag_grads, client_mean_grads, client_var_grads)
dump_norm_stats = self.config.get('dump_norm_stats', False)
if dump_norm_stats:
with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:
outF.write('{}\n'.format(json.dumps(list(client_norm_grads))))
# Print the privacy metrics
if apply_privacy_metrics:
for metric, values in privacy_metrics_stats.items():
if metric == 'Dropped clients':
log_metric(metric, sum(values))
else:
log_metric(metric, max(values))
if type(adaptive_leakage) is float:
values = privacy_metrics_stats['Practical epsilon (Max leakage)']
new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]
print_rank('Updating leakage threshold to {}'.format(new_threshold))
self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold
# Mark that all clients have been processed
end = time.time()
self.run_stats['secsPerClientRound'].append(end - begin)
begin = end
# Log the training loss to tensorboard/AML
log_metric('Training loss', sum(self.train_loss))
# Combine payloads
self.losses = self.strategy.combine_payloads(
worker_trainer=self.worker_trainer,
curr_iter=i,
num_clients_curr_iter=num_clients_curr_iter,
client_stats=client_stats,
logger=log_metric,
)
# Run a couple of iterations of training data on the server
if self.server_trainer is not None:
print_rank('Running replay iterations on server')
if 'updatable_names' in self.server_trainer_config:
set_component_wise_lr(
self.worker_trainer.model,
self.server_optimizer_config,
self.server_trainer_config['updatable_names']
)
self.server_trainer.prepare_iteration(self.worker_trainer.model)
self.server_trainer.train_desired_samples(self.server_replay_iterations)
self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())
torch.cuda.empty_cache()
# Update a sampling scheduler
print_rank('Run ss scheduler')
self.worker_trainer.run_ss_scheduler()
# Run inference and score on val/test depending on the iter. number
if ((i+1) % self.val_freq) == 0:
eval_list.append("val")
if ((i+1) % self.req_freq) == 0 :
eval_list.append("test")
if len(eval_list)> 0:
print_rank('Running {} at itr={}'.format(eval_list,i+1))
self.metrics['worker_trainer'] = self.worker_trainer
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
self.losses = self.evaluation.losses
eval_list = []
# Create a schedule for the initial_lr (for the worker)
if 'val' in eval_list:
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
if not (self.losses[0] < self.metrics['best_val_loss']):
self.lr_weight *= self.lr_decay_factor
print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))
# Backup the current best models
self.backup_models(i)
# Fall back to the best model if the option is enabled
self.fall_back_to_prev_best_status()
# Logging the latest best values
update_json_log(
self.log_path,
{
'i': i + 1,
'best_val_loss': float(self.metrics['best_val_loss']),
'best_val_acc': float(self.metrics['best_val_acc']),
'best_test_loss': float(self.metrics['best_test_loss']),
'best_test_acc': float(self.metrics['best_test_acc']),
'weight': float(self.lr_weight),
'num_label_updates': int(self.no_label_updates)
},
)
end = time.time()
# Aggregate stats
self.run_stats['secsPerRoundHousekeeping'].append(end - begin)
self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \
self.run_stats['secsPerRoundHousekeeping'][-1])
log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])
if self.do_profiling:
log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])
log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])
metrics_for_stats = [
'secsPerClient',
'secsPerClientTraining',
'secsPerClientFull',
'secsPerClientSetup',
'mpiCosts',
]
for metric in metrics_for_stats:
log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))
log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))
log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))
for k in self.run_stats:
if k in metrics_for_stats:
print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)
else:
print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)
# Log all the metrics
for k in metrics_payload:
run.log(k, metrics_payload[k])
finally: # perform cleanup even if error was raised above
self.terminate_workers(terminate=(not self.do_clustering))
def backup_models(self, i):
# Always save the latest model
self.worker_trainer.save(
model_path=self.model_path,
token='latest',
config=self.config['server_config'],
)
if (i % self.model_backup_freq) == 0: # save the current best models
self.worker_trainer.save(
model_path=self.model_path,
token='epoch{}'.format(i),
config=self.config['server_config']
)
for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:
src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))
if os.path.exists(src_model_path):
dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))
shutil.copyfile(src_model_path, dst_model_path)
print_rank('Saved {}'.format(dst_model_path))
def fall_back_to_prev_best_status(self):
if self.fall_back_to_best_model:
print_rank('falling back to model {}'.format(self.best_model_path))
# Save current learning rate
tmp_lr = get_lr(self.worker_trainer.optimizer)
# Load previous best model
self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)
# Update previous learning rate on optimizer
for g in self.worker_trainer.optimizer.param_groups:
g['lr'] = tmp_lr
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
def select_server(server_type, config):
return OptimizationServer
| true
| true
|
7903713a400493826bb2db2002ac25915a1e24f0
| 1,895
|
py
|
Python
|
webapp/external_access.py
|
Quoding/petricore
|
c2275db64a567ec5dc8db1f4283969dfb749572a
|
[
"MIT"
] | null | null | null |
webapp/external_access.py
|
Quoding/petricore
|
c2275db64a567ec5dc8db1f4283969dfb749572a
|
[
"MIT"
] | 2
|
2020-01-23T15:24:08.000Z
|
2020-03-23T19:16:45.000Z
|
webapp/external_access.py
|
calculquebec/petricore
|
c2275db64a567ec5dc8db1f4283969dfb749572a
|
[
"MIT"
] | null | null | null |
import pymysql.cursors
import ldap
def get_domain_name():
"""
Returns the domain name of the current configuration from a config file
Returns
-------
string
the domain name
"""
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readline()
domain = line.split("=")[1].rstrip() # Take right hand side of = and remove \n
return domain
def get_db_password():
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readlines()[1]
password = line.split("=")[
1
].rstrip() # Take right hand side of = and remove \n
return password
def create_slurm_db_connection(host, port, user, password, db):
"""
Creates the connection to the database (MySQL) so it can be queried
Parameters
----------
host : string
hostname on which is located the DB
port : integer
port on which the connection is to be established
user : string
user name with which the connection is to be established
password : string
password of the user on the database (of the user `user`)
db : string
name of the database which will be queried
Returns
-------
PyMySQL Connection object
"""
connection = pymysql.connect(
host=host, port=port, user=user, password=password, db=db,
)
print("[+] Slurm accounting DB connection is up! [+]")
return connection
def create_ldap_connection(host):
"""
Creates an LDAP connection object with a given hostname
Parameters
----------
host : hostname with the LDAP database in the form of (ldap://host)
Returns
-------
LDAP connection object
"""
connection = ldap.initialize(host)
connection.set_option(ldap.OPT_REFERRALS, 0)
connection.simple_bind_s()
return connection
| 25.608108
| 87
| 0.626385
|
import pymysql.cursors
import ldap
def get_domain_name():
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readline()
domain = line.split("=")[1].rstrip()
return domain
def get_db_password():
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readlines()[1]
password = line.split("=")[
1
].rstrip()
return password
def create_slurm_db_connection(host, port, user, password, db):
connection = pymysql.connect(
host=host, port=port, user=user, password=password, db=db,
)
print("[+] Slurm accounting DB connection is up! [+]")
return connection
def create_ldap_connection(host):
connection = ldap.initialize(host)
connection.set_option(ldap.OPT_REFERRALS, 0)
connection.simple_bind_s()
return connection
| true
| true
|
790371466426bec98edb736c5dd24251f6ac2343
| 1,250
|
py
|
Python
|
test/test_data_protection_officer.py
|
My-Data-My-Consent/python-sdk
|
414640bcda6350e6f5e74e42442737eb8d5b7447
|
[
"Apache-2.0"
] | null | null | null |
test/test_data_protection_officer.py
|
My-Data-My-Consent/python-sdk
|
414640bcda6350e6f5e74e42442737eb8d5b7447
|
[
"Apache-2.0"
] | 5
|
2021-12-19T10:29:43.000Z
|
2022-03-31T22:15:37.000Z
|
test/test_data_protection_officer.py
|
mydatamyconsent/python-sdk
|
414640bcda6350e6f5e74e42442737eb8d5b7447
|
[
"Apache-2.0"
] | null | null | null |
"""
My Data My Consent - Developer API
Unleashing the power of data consent by establishing trust. The Platform Core Developer API defines a set of capabilities that can be used to request, issue, manage and update data, documents and credentials by organizations. The API can be used to request, manage and update Decentralised Identifiers, Financial Data, Health Data issue Documents, Credentials directly or using OpenID Connect flows, and verify Messages signed with DIDs and much more. # noqa: E501
The version of the OpenAPI document: v1
Contact: support@mydatamyconsent.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import com.mydatamyconsent
from com.mydatamyconsent.model.data_protection_officer import DataProtectionOfficer
class TestDataProtectionOfficer(unittest.TestCase):
"""DataProtectionOfficer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDataProtectionOfficer(self):
"""Test DataProtectionOfficer"""
# FIXME: construct object with mandatory attributes with example values
# model = DataProtectionOfficer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 33.783784
| 469
| 0.7448
|
import sys
import unittest
import com.mydatamyconsent
from com.mydatamyconsent.model.data_protection_officer import DataProtectionOfficer
class TestDataProtectionOfficer(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testDataProtectionOfficer(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
790371ae35706b91f27bf2d89e39770f3441a18b
| 4,910
|
py
|
Python
|
superset/sql_parse.py
|
gabe-lyons/incubator-superset
|
7669cdb8c51bcc3f298aff2a14cbfeea3cbf5f13
|
[
"Apache-2.0"
] | 4
|
2018-07-25T17:12:13.000Z
|
2020-12-28T10:26:53.000Z
|
superset/sql_parse.py
|
ksangeet9ap/incubator-superset
|
f417172071503e48bdbbe00d8254c204928a5d3e
|
[
"Apache-2.0"
] | 1
|
2018-02-22T23:29:06.000Z
|
2018-02-23T21:44:00.000Z
|
superset/sql_parse.py
|
ksangeet9ap/incubator-superset
|
f417172071503e48bdbbe00d8254c204928a5d3e
|
[
"Apache-2.0"
] | 4
|
2020-03-07T11:58:42.000Z
|
2020-05-26T02:07:27.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=C,R,W
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sqlparse
from sqlparse.sql import Identifier, IdentifierList
from sqlparse.tokens import Keyword, Name
RESULT_OPERATIONS = {'UNION', 'INTERSECT', 'EXCEPT'}
PRECEDES_TABLE_NAME = {'FROM', 'JOIN', 'DESC', 'DESCRIBE', 'WITH'}
# TODO: some sql_lab logic here.
class SupersetQuery(object):
def __init__(self, sql_statement):
self.sql = sql_statement
self._table_names = set()
self._alias_names = set()
# TODO: multistatement support
logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.sql)
for statement in self._parsed:
self.__extract_from_token(statement)
self._table_names = self._table_names - self._alias_names
@property
def tables(self):
return self._table_names
def is_select(self):
return self._parsed[0].get_type() == 'SELECT'
def stripped(self):
sql = self.sql
if sql:
while sql[-1] in (' ', ';', '\n', '\t'):
sql = sql[:-1]
return sql
@staticmethod
def __precedes_table_name(token_value):
for keyword in PRECEDES_TABLE_NAME:
if keyword in token_value:
return True
return False
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@staticmethod
def __is_result_operation(keyword):
for operation in RESULT_OPERATIONS:
if operation in keyword.upper():
return True
return False
@staticmethod
def __is_identifier(token):
return (
isinstance(token, IdentifierList) or isinstance(token, Identifier))
def __process_identifier(self, identifier):
# exclude subselects
if '(' not in '{}'.format(identifier):
self._table_names.add(SupersetQuery.__get_full_name(identifier))
return
# store aliases
if hasattr(identifier, 'get_alias'):
self._alias_names.add(identifier.get_alias())
if hasattr(identifier, 'tokens'):
# some aliases are not parsed properly
if identifier.tokens[0].ttype == Name:
self._alias_names.add(identifier.tokens[0].value)
self.__extract_from_token(identifier)
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
# TODO(bkyryliuk): enforce that all the columns have names.
# Presto requires it for the CTA operation.
# TODO(bkyryliuk): drop table if allowed, check the namespace and
# the permissions.
# TODO raise if multi-statement
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
def __extract_from_token(self, token):
if not hasattr(token, 'tokens'):
return
table_name_preceding_token = False
for item in token.tokens:
if item.is_group and not self.__is_identifier(item):
self.__extract_from_token(item)
if item.ttype in Keyword:
if SupersetQuery.__precedes_table_name(item.value.upper()):
table_name_preceding_token = True
continue
if not table_name_preceding_token:
continue
if item.ttype in Keyword:
if SupersetQuery.__is_result_operation(item.value):
table_name_preceding_token = False
continue
# FROM clause is over
break
if isinstance(item, Identifier):
self.__process_identifier(item)
if isinstance(item, IdentifierList):
for token in item.tokens:
if SupersetQuery.__is_identifier(token):
self.__process_identifier(token)
| 34.822695
| 79
| 0.614868
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sqlparse
from sqlparse.sql import Identifier, IdentifierList
from sqlparse.tokens import Keyword, Name
RESULT_OPERATIONS = {'UNION', 'INTERSECT', 'EXCEPT'}
PRECEDES_TABLE_NAME = {'FROM', 'JOIN', 'DESC', 'DESCRIBE', 'WITH'}
class SupersetQuery(object):
def __init__(self, sql_statement):
self.sql = sql_statement
self._table_names = set()
self._alias_names = set()
logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.sql)
for statement in self._parsed:
self.__extract_from_token(statement)
self._table_names = self._table_names - self._alias_names
@property
def tables(self):
return self._table_names
def is_select(self):
return self._parsed[0].get_type() == 'SELECT'
def stripped(self):
sql = self.sql
if sql:
while sql[-1] in (' ', ';', '\n', '\t'):
sql = sql[:-1]
return sql
@staticmethod
def __precedes_table_name(token_value):
for keyword in PRECEDES_TABLE_NAME:
if keyword in token_value:
return True
return False
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@staticmethod
def __is_result_operation(keyword):
for operation in RESULT_OPERATIONS:
if operation in keyword.upper():
return True
return False
@staticmethod
def __is_identifier(token):
return (
isinstance(token, IdentifierList) or isinstance(token, Identifier))
def __process_identifier(self, identifier):
if '(' not in '{}'.format(identifier):
self._table_names.add(SupersetQuery.__get_full_name(identifier))
return
if hasattr(identifier, 'get_alias'):
self._alias_names.add(identifier.get_alias())
if hasattr(identifier, 'tokens'):
if identifier.tokens[0].ttype == Name:
self._alias_names.add(identifier.tokens[0].value)
self.__extract_from_token(identifier)
def as_create_table(self, table_name, overwrite=False):
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
def __extract_from_token(self, token):
if not hasattr(token, 'tokens'):
return
table_name_preceding_token = False
for item in token.tokens:
if item.is_group and not self.__is_identifier(item):
self.__extract_from_token(item)
if item.ttype in Keyword:
if SupersetQuery.__precedes_table_name(item.value.upper()):
table_name_preceding_token = True
continue
if not table_name_preceding_token:
continue
if item.ttype in Keyword:
if SupersetQuery.__is_result_operation(item.value):
table_name_preceding_token = False
continue
break
if isinstance(item, Identifier):
self.__process_identifier(item)
if isinstance(item, IdentifierList):
for token in item.tokens:
if SupersetQuery.__is_identifier(token):
self.__process_identifier(token)
| true
| true
|
7903730eae04facba1aeb2dc3266fd3633c7ab6a
| 1,184
|
py
|
Python
|
api/tests/routes/test_detection.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | 1
|
2021-09-26T06:03:10.000Z
|
2021-09-26T06:03:10.000Z
|
api/tests/routes/test_detection.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | null | null | null |
api/tests/routes/test_detection.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import pytest
import numpy as np
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_text_detection(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/detection", files={'file': mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
# Check that IoU with GT if reasonable
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt['box'] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]
| 39.466667
| 98
| 0.714527
|
import pytest
import numpy as np
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_text_detection(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/detection", files={'file': mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt['box'] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]
| true
| true
|
790373ab0d7769df144ec513b502f523788f4b22
| 2,534
|
py
|
Python
|
mypy/infer.py
|
ooprathamm/mypy
|
1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac
|
[
"PSF-2.0"
] | null | null | null |
mypy/infer.py
|
ooprathamm/mypy
|
1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac
|
[
"PSF-2.0"
] | null | null | null |
mypy/infer.py
|
ooprathamm/mypy
|
1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac
|
[
"PSF-2.0"
] | null | null | null |
"""Utilities for type argument inference."""
from typing import List, Optional, Sequence, NamedTuple
from mypy.constraints import (
infer_constraints, infer_constraints_for_callable, SUBTYPE_OF, SUPERTYPE_OF
)
from mypy.types import Type, TypeVarId, CallableType, Instance
from mypy.nodes import ArgKind
from mypy.solve import solve_constraints
class ArgumentInferContext(NamedTuple):
"""Type argument inference context.
We need this because we pass around ``Mapping`` and ``Iterable`` types.
These types are only known by ``TypeChecker`` itself.
It is required for ``*`` and ``**`` argument inference.
https://github.com/python/mypy/issues/11144
"""
mapping_type: Instance
iterable_type: Instance
def infer_function_type_arguments(callee_type: CallableType,
arg_types: Sequence[Optional[Type]],
arg_kinds: List[ArgKind],
formal_to_actual: List[List[int]],
context: ArgumentInferContext,
strict: bool = True) -> List[Optional[Type]]:
"""Infer the type arguments of a generic function.
Return an array of lower bound types for the type variables -1 (at
index 0), -2 (at index 1), etc. A lower bound is None if a value
could not be inferred.
Arguments:
callee_type: the target generic function
arg_types: argument types at the call site (each optional; if None,
we are not considering this argument in the current pass)
arg_kinds: nodes.ARG_* values for arg_types
formal_to_actual: mapping from formal to actual variable indices
"""
# Infer constraints.
constraints = infer_constraints_for_callable(
callee_type, arg_types, arg_kinds, formal_to_actual, context)
# Solve constraints.
type_vars = callee_type.type_var_ids()
return solve_constraints(type_vars, constraints, strict)
def infer_type_arguments(type_var_ids: List[TypeVarId],
template: Type, actual: Type,
is_supertype: bool = False) -> List[Optional[Type]]:
# Like infer_function_type_arguments, but only match a single type
# against a generic type.
constraints = infer_constraints(template, actual,
SUPERTYPE_OF if is_supertype else SUBTYPE_OF)
return solve_constraints(type_var_ids, constraints)
| 40.222222
| 82
| 0.649566
|
from typing import List, Optional, Sequence, NamedTuple
from mypy.constraints import (
infer_constraints, infer_constraints_for_callable, SUBTYPE_OF, SUPERTYPE_OF
)
from mypy.types import Type, TypeVarId, CallableType, Instance
from mypy.nodes import ArgKind
from mypy.solve import solve_constraints
class ArgumentInferContext(NamedTuple):
mapping_type: Instance
iterable_type: Instance
def infer_function_type_arguments(callee_type: CallableType,
arg_types: Sequence[Optional[Type]],
arg_kinds: List[ArgKind],
formal_to_actual: List[List[int]],
context: ArgumentInferContext,
strict: bool = True) -> List[Optional[Type]]:
constraints = infer_constraints_for_callable(
callee_type, arg_types, arg_kinds, formal_to_actual, context)
type_vars = callee_type.type_var_ids()
return solve_constraints(type_vars, constraints, strict)
def infer_type_arguments(type_var_ids: List[TypeVarId],
template: Type, actual: Type,
is_supertype: bool = False) -> List[Optional[Type]]:
constraints = infer_constraints(template, actual,
SUPERTYPE_OF if is_supertype else SUBTYPE_OF)
return solve_constraints(type_var_ids, constraints)
| true
| true
|
79037536069031e76e3ee7cee6b042cb0e25ffa4
| 2,897
|
py
|
Python
|
Broca/faq_engine/agent.py
|
lawRossi/Broca
|
7dcb4e1cb7087c4bd38ad01e73eb1fdab4c6d13d
|
[
"MIT"
] | 3
|
2021-05-10T06:36:21.000Z
|
2021-05-10T06:47:31.000Z
|
Broca/faq_engine/agent.py
|
lawRossi/Broca
|
7dcb4e1cb7087c4bd38ad01e73eb1fdab4c6d13d
|
[
"MIT"
] | null | null | null |
Broca/faq_engine/agent.py
|
lawRossi/Broca
|
7dcb4e1cb7087c4bd38ad01e73eb1fdab4c6d13d
|
[
"MIT"
] | null | null | null |
"""
@Author: Rossi
Created At: 2021-02-21
"""
import json
import time
from mako.template import Template
from Broca.faq_engine.index import ESIndex, VectorIndex
from Broca.message import BotMessage
class FAQAgent:
def __init__(self, agent_name, es_index, vector_index, threshold, topk, prompt_threshold,
template, prompt_template):
self.agent_name = agent_name
self.es_index = es_index
self.vector_index = vector_index
self.threshold = threshold
self.topk = topk
self.prompt_threshold = prompt_threshold
self.template = template
self.prompt_template = prompt_template
@classmethod
def from_config(cls, config):
agent_name = config["agent_name"]
es_config = config["es_index"]
es_index = ESIndex.from_config(es_config)
vector_index_config = config["vector_index"]
vector_index = VectorIndex.from_config(vector_index_config)
if config["build_index_at_start"]:
es_index.build_index_from_file(config["document_file"])
time.sleep(5) # wait until the es index gets ready
vector_index.build_index(es_index)
vector_index.load_index()
threshold = config["threshold"]
topk = config["topk"]
prompt_threshold = config["prompt_threshold"]
template = Template(filename=config["template"])
prompt_template = Template(filename=config["prompt_template"])
return cls(agent_name, es_index, vector_index, threshold, topk, prompt_threshold, template, prompt_template)
@classmethod
def from_config_file(cls, config_file):
with open(config_file, encoding="utf-8") as fi:
config = json.load(fi)
return cls.from_config(config)
def handle_message(self, message):
"""Respond to the user message by retriving documents from the knowledge base.
Args:
message ([type]): [description]
"""
query = message.text
candidates, similarities = self.vector_index.retrieve(query, self.topk)
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.threshold]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result["response"] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.prompt_threshold]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result["prompt"] = BotMessage(message.sender_id, prompt.strip())
return result
| 39.684932
| 132
| 0.669313
|
import json
import time
from mako.template import Template
from Broca.faq_engine.index import ESIndex, VectorIndex
from Broca.message import BotMessage
class FAQAgent:
def __init__(self, agent_name, es_index, vector_index, threshold, topk, prompt_threshold,
template, prompt_template):
self.agent_name = agent_name
self.es_index = es_index
self.vector_index = vector_index
self.threshold = threshold
self.topk = topk
self.prompt_threshold = prompt_threshold
self.template = template
self.prompt_template = prompt_template
@classmethod
def from_config(cls, config):
agent_name = config["agent_name"]
es_config = config["es_index"]
es_index = ESIndex.from_config(es_config)
vector_index_config = config["vector_index"]
vector_index = VectorIndex.from_config(vector_index_config)
if config["build_index_at_start"]:
es_index.build_index_from_file(config["document_file"])
time.sleep(5)
vector_index.build_index(es_index)
vector_index.load_index()
threshold = config["threshold"]
topk = config["topk"]
prompt_threshold = config["prompt_threshold"]
template = Template(filename=config["template"])
prompt_template = Template(filename=config["prompt_template"])
return cls(agent_name, es_index, vector_index, threshold, topk, prompt_threshold, template, prompt_template)
@classmethod
def from_config_file(cls, config_file):
with open(config_file, encoding="utf-8") as fi:
config = json.load(fi)
return cls.from_config(config)
def handle_message(self, message):
query = message.text
candidates, similarities = self.vector_index.retrieve(query, self.topk)
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.threshold]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result["response"] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.prompt_threshold]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result["prompt"] = BotMessage(message.sender_id, prompt.strip())
return result
| true
| true
|
790375eb79545b153373de236a12b78776be585b
| 578
|
py
|
Python
|
Python Aulas/Mundo 1/Aula 010c.py
|
rodrigobarbonifilho/Python
|
807bf01ddacd0a0f7f563ae5a65f8fb2dd22ca16
|
[
"MIT"
] | null | null | null |
Python Aulas/Mundo 1/Aula 010c.py
|
rodrigobarbonifilho/Python
|
807bf01ddacd0a0f7f563ae5a65f8fb2dd22ca16
|
[
"MIT"
] | null | null | null |
Python Aulas/Mundo 1/Aula 010c.py
|
rodrigobarbonifilho/Python
|
807bf01ddacd0a0f7f563ae5a65f8fb2dd22ca16
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Exemplos para entendiemnto
"""nome = input('Qual seu nome?' )
if nome == 'Rodrigo' or nome == 'RAYANNE':
print('Que nome lindo vocé tem!')
else:
print('Que nome tão normal!!!')
print('Bom dia, {}'.format(nome))"""
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2) / 2
print('A sua média foi: {:.1f}'.format(m))
print('A sua media foi boa!' if m >= 6.0 else 'Sua media foi ruim,estude mais!')
"""if m >= 6.0:
print('Sua média foi boa!')
else:
print('A sua média foi ruim,estude mais!')"""
| 28.9
| 80
| 0.615917
|
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2) / 2
print('A sua média foi: {:.1f}'.format(m))
print('A sua media foi boa!' if m >= 6.0 else 'Sua media foi ruim,estude mais!')
| true
| true
|
7903760959e968ef65e34c134e43c14d89f97e93
| 540
|
py
|
Python
|
E_business_project/apps/goods/urls.py
|
ambushonallsides1/E_business_project
|
bf386391e58e0e82787ddc07fc678937e345a4cb
|
[
"MIT"
] | 1
|
2020-02-05T14:00:19.000Z
|
2020-02-05T14:00:19.000Z
|
E_business_project/apps/goods/urls.py
|
ambushonallsides1/E_business_project
|
bf386391e58e0e82787ddc07fc678937e345a4cb
|
[
"MIT"
] | 6
|
2020-05-11T20:34:17.000Z
|
2021-11-02T15:46:41.000Z
|
E_business_project/apps/goods/urls.py
|
ambushonallsides1/E_business_project
|
bf386391e58e0e82787ddc07fc678937e345a4cb
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
# 商品列表页
url(r'^list/(?P<category_id>\d+)/(?P<page_num>\d+)/$', views.ListView.as_view(), name='list'),
# 热销排行数据
url(r'^hot/(?P<category_id>\d+)/$', views.HotGoodsView.as_view()),
# 商品详情页
url(r'^detail/(?P<sku_id>\d+)/$', views.DetailView.as_view(), name='detail'),
# 统计分类商品访问量
url(r'^detail/visit/(?P<category_id>\d+)/$', views.DetailVisitView.as_view()),
# 浏览记录
url(r'^browse_histories/$', views.UserBrowseHistory.as_view()),
]
| 31.764706
| 98
| 0.627778
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^list/(?P<category_id>\d+)/(?P<page_num>\d+)/$', views.ListView.as_view(), name='list'),
url(r'^hot/(?P<category_id>\d+)/$', views.HotGoodsView.as_view()),
url(r'^detail/(?P<sku_id>\d+)/$', views.DetailView.as_view(), name='detail'),
url(r'^detail/visit/(?P<category_id>\d+)/$', views.DetailVisitView.as_view()),
url(r'^browse_histories/$', views.UserBrowseHistory.as_view()),
]
| true
| true
|
7903762327cb4b6f5b580732681dcf05dcaaee2f
| 8,881
|
py
|
Python
|
solo/utils/classification_dataloader.py
|
fariasfc/solo-learn
|
f53ff40edbc7e96e06db5238d8c3a44f7b8965c1
|
[
"MIT"
] | null | null | null |
solo/utils/classification_dataloader.py
|
fariasfc/solo-learn
|
f53ff40edbc7e96e06db5238d8c3a44f7b8965c1
|
[
"MIT"
] | null | null | null |
solo/utils/classification_dataloader.py
|
fariasfc/solo-learn
|
f53ff40edbc7e96e06db5238d8c3a44f7b8965c1
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
| 31.492908
| 99
| 0.596329
|
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
| true
| true
|
7903777a50ff41a94bed60837d113e3a3fca6cc0
| 23,095
|
py
|
Python
|
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) 2021 Trevor P. Martin. All rights reserved.
# Distributed under the MIT License.
# -----------------------------------------------------------------------------
from Data import encode_data
# from utils import cross_validation
from Models import utils
from Models import build_models
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
import tensorflow as tf
import copy
class CNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=128,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN04(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN05(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.15))
model.add(tf.keras.layers.Dense(units=units//4, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units*2, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class RNN(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=units,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.LSTM(
units=units//2,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
def run(datasets,
splice_sites,
sub_models,
save,
vis,
iter,
metrics,
summary,
config,
num_folds,
bal,
imbal,
imbal_t,
imbal_f,
batch_size,
epochs
):
"""
Parameters
----------
dataset: a string {nn269, ce, hs3d} indicating which dataset to use
splice_site_type: a string {acceptor, donor} indicating which splice
site to train on
model_architecture: a string {cnn, dnn, rnn} indicating which model
architecture to use for training
save_model: boolean, whether to save the current model
bal: boolean, whether to balance the dataset
summary: boolean, whether to print out the model architecture summary
config: boolean, whether to print out the model's configuration
visualize: boolean, whether to save a performance graph of the model
metrics: boolean, whether to print out the evaluation metrics for the model
num_folds: int (default 10), the number of folds for k-fold cross validation
epochs: int (default 15), the number of epochs for the chosen model
batch_size: int (default 32), the model batch size
model_iter: integer, the iteration of the current model architecture (e.g.
if this is the third cnn architecture you are testing, use 3)
"""
# (acceptor row len, donor row len) by dataset
network_rows = {
'acceptor':{
'nn269':90, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
'donor':{
'nn269':15, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
}
# initialize selected sub models
to_run = dict(
[
(sub_model,{
'nn269':'', 'ce':'',
'hs3d':'', 'hs2':'',
'ce2':'', 'dm':'',
'ar':'', 'or':''
}) for sub_model in sub_models
]
)
# results dictionary
results = copy.deepcopy(to_run)
# populate sub models with encoded data
for sub_model in sub_models:
for dataset in datasets:
# encode datasets -> return (acc_x, acc_y, don_x, don_y)
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
# get a metrics dictionary
evals = dict(
[
(sub_model, {
'f1':'', 'precision':'',
'sensitivity':'', 'specificity':'',
'recall':'', 'mcc':'',
'err_rate':''
}) for sub_model in sub_models
]
)
# accumulate results from running cross validation
for sub_model in sub_models:
for dataset in datasets:
if to_run[sub_model][dataset] == '':
pass
else:
results[sub_model][dataset] = utils.cross_validation(
num_folds,
sub_model,
splice_sites,
dataset,
to_run[sub_model][dataset],# encoded data for dataset (ds)
network_rows, # donor, acceptor rows for ds
evals,
summary,
config,
batch_size,
epochs,
save,
)
# if vis:
print(results)
return results
# plot results
# loss_acc_sub_models(
# results,
# datasets,
# sub_models,
# epochs,
# num_folds,
# bal
# )
# # different by splice site type
# if splice_site_type == 'acceptor':
# cnn_X_train, cnn_y_train = cnn_acc_x, acc_y
# # same name to preserve for loop structure
# X_train, y_train = rd_acc_x, acc_y
# dataset_row_num = network_rows[dataset][0]
# if splice_site_type == 'donor':
# cnn_X_train, cnn_y_train = cnn_don_x, don_y
# X_train, y_train = rd_don_x, don_y
# dataset_row_num = network_rows[dataset][1]
#
#
# # if tune_rnn:
# # tune_rnn()
#
# # perform cross validation
# # general
# trn_fold_accs, trn_fold_losses = [], []
# val_fold_accs, val_fold_losses = [], []
# # esplice
# rnn_va, rnn_vl, cnn_vl, cnn_va, dnn_vl, dnn_va = [],[],[],[],[],[]
# rnn_ta, rnn_tl, cnn_tl, cnn_ta, dnn_tl, dnn_ta = [],[],[],[],[],[]
#
# # this loop inspired by https://www.machinecurve.com/
# #index.php/2020/02/18/how-to-use-k-fold-cross-validation-with-keras/
# k_fold = KFold(n_splits=num_folds, shuffle=False)
# fold = 1
# for train, test in k_fold.split(X_train, y_train):
# if model_architecture != 'esplice':
# X_trn, y_trn = X_train[train], y_train[train]
# X_val, y_val = X_train[test], y_train[test]
# if model_architecture=='cnn':
# history, model = build_cnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='dnn':
# history, model = build_dnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='rnn':
# history, model = build_rnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# # model.predict(X_trn)
# val_fold_accs.append(history.history['val_accuracy'])
# val_fold_losses.append(history.history['val_loss'])
# trn_fold_accs.append(history.history['accuracy'])
# trn_fold_losses.append(history.history['loss'])
# fold += 1
# else:
# # set up submodel datasets
# cnn_X_trn, cnn_y_trn = cnn_X_train[train], cnn_y_train[train]
# cnn_X_val, cnn_y_val = cnn_X_train[test], cnn_y_train[test]
# rd_X_trn, rd_y_trn = X_train[train], y_train[train]
# rd_X_val, rd_y_val = X_train[test], y_train[test]
# # build each submodel
# hist01, submodel_01 = build_cnn(
# dataset_row_num,
# summary,
# cnn_X_trn,
# cnn_y_trn,
# batch_size,
# epochs,
# cnn_X_val,
# cnn_y_val,
# fold,
# num_folds
# )
# hist02, submodel_02 = build_dnn(
# dataset_row_num,
# summary,
# rd_X_trn,
# rd_y_trn,
# batch_size,
# epochs,
# rd_X_val,
# rd_y_val,
# fold,
# num_folds
# )
# # hist03, submodel_03 = build_rnn(
# # dataset_row_num,
# # summary,
# # rd_X_trn,
# # rd_y_trn,
# # batch_size,
# # epochs,
# # rd_X_val,
# # rd_y_val,
# # fold,
# # num_folds
# # )
# models = [submodel_01, submodel_02]#, submodel_03]
# trn_scores, val_scores = EnsembleSplice.build(
# models,
# batch_size,
# cnn_X_trn,
# cnn_y_trn,
# cnn_X_val,
# cnn_y_val,
# rd_X_trn,
# rd_y_trn,
# rd_X_val,
# rd_y_val,
# )
# # get final epoch accuracy
# trn_fold_accs.append(trn_scores)
# val_fold_accs.append(val_scores)
# # rnn_va.append(hist03.history['val_accuracy'])
# # rnn_vl.append(hist03.history['val_loss'])
# # rnn_ta.append(hist03.history['accuracy'])
# # rnn_tl.append(hist03.history['loss'])
# # cnn_vl.append(hist01.history['val_loss'])
# # cnn_va.append(hist01.history['val_accuracy'])
# # cnn_tl.append(hist01.history['loss'])
# # cnn_ta.append(hist01.history['accuracy'])
# # dnn_vl.append(hist02.history['val_loss'])
# # dnn_va.append(hist02.history['val_accuracy'])
# # dnn_tl.append(hist02.history['loss'])
# # dnn_ta.append(hist02.history['accuracy'])
#
# # rnn_va.append(hist03.history['val_accuracy'][-1])
# # rnn_vl.append(hist03.history['val_loss'][-1])
# # rnn_ta.append(hist03.history['accuracy'][-1])
# # rnn_tl.append(hist03.history['loss'][-1])
# cnn_vl.append(hist01.history['val_loss'][-1])
# cnn_va.append(hist01.history['val_accuracy'][-1])
# cnn_tl.append(hist01.history['loss'][-1])
# cnn_ta.append(hist01.history['accuracy'][-1])
# dnn_vl.append(hist02.history['val_loss'][-1])
# dnn_va.append(hist02.history['val_accuracy'][-1])
# dnn_tl.append(hist02.history['loss'][-1])
# dnn_ta.append(hist02.history['accuracy'][-1])
#
# fold += 1
#
# # do something with predicted values and real values to get AUC-ROC scores
# # sklearn.metrics.roc_auc_score
# # also get f-score and other scores here
# # maybe connect tune_rnn and build_rnn -> get tuned parameters and plug them
# # in automatically to RNN
#
# if model_architecture != 'esplice':
#
# val_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_accs).T)
# val_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_losses).T)
# trn_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_accs).T)
# trn_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_losses).T)
#
# std_val_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_accs).T)
# std_val_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_losses).T)
# std_trn_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_accs).T)
# std_trn_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_losses).T)
#
# values = [
# val_acc_by_epoch,
# std_val_acc,
# trn_acc_by_epoch,
# std_trn_acc,
# val_loss_by_epoch,
# std_val_loss,
# trn_loss_by_epoch,
# std_trn_loss
# ]
#
# if model_architecture == 'esplice':
#
# # make a DICTIONARY AREY
# # ES_Val_ACc: (vacc, std_va)
# mean_good = lambda seq: np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(seq).T)
# std_good = lambda seq: np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(seq).T)
# vacc = val_fold_accs
# tacc = trn_fold_accs
# # std_va = val_fold_accs
# # std_ta = trn_fold_accs
#
# values = [
# val_fold_accs,
# trn_fold_accs,
# #rnn_va,
# # rnn_vl,
# #rnn_ta,
# # rnn_tl,
# # cnn_vl,
# cnn_va,
# # cnn_tl,
# cnn_ta,
# # dnn_vl,
# dnn_va,
# # dnn_tl,
# dnn_ta
# ]
#
# # cnn_mva = mean_good(cnn_va)
# # cnn_mvl = mean_good(cnn_vl)
# # cnn_mta = mean_good(cnn_ta)
# # cnn_mtl = mean_good(cnn_tl)
# # cnn_sva = std_good(cnn_va)
# # cnn_svl = std_good(cnn_vl)
# # cnn_sta = std_good(cnn_ta)
# # cnn_stl = std_good(cnn_tl)
# #
# # dnn_mva = mean_good(dnn_va)
# # dnn_mvl = mean_good(dnn_vl)
# # dnn_mta = mean_good(dnn_ta)
# # dnn_mtl = mean_good(dnn_tl)
# # dnn_sva = std_good(dnn_va)
# # dnn_svl = std_good(dnn_vl)
# # dnn_sta = std_good(dnn_ta)
# # dnn_stl = std_good(dnn_tl)
# #
# # rnn_mva = mean_good(rnn_va)
# # rnn_mvl = mean_good(rnn_vl)
# # rnn_mta = mean_good(rnn_ta)
# # rnn_mtl = mean_good(rnn_tl)
# # rnn_sva = std_good(rnn_va)
# # rnn_svl = std_good(rnn_vl)
# # rnn_sta = std_good(rnn_ta)
# # rnn_stl = std_good(rnn_tl)
#
# # values = [
# # vacc,
# # # std_va,
# # tacc,
# # # std_ta,
# # cnn_mva,
# # cnn_sva,
# # cnn_mvl,
# # cnn_svl,
# # cnn_mta,
# # cnn_sta,
# # cnn_mtl,
# # cnn_stl,
# # dnn_mva,
# # dnn_sva,
# # dnn_mvl,
# # dnn_svl,
# # dnn_mta,
# # dnn_sta,
# # dnn_mtl,
# # dnn_stl,
# # rnn_mva,
# # rnn_sva,
# # rnn_mvl,
# # rnn_svl,
# # rnn_mta,
# # rnn_sta,
# # rnn_mtl,
# # rnn_stl,
# # ]
# if config:
# print(model.get_config())
# if save_model:
# name = input('What would you like to name this model?: ')
# model.save(f'{name}')
# tf.keras.utils.plot_model(model, f'{name}.png', show_shapes=True)
# if visualize:
# loss_acc_esplice(
# values,
# model_architecture,
# dataset,
# splice_site_type,
# num_folds,
# epochs,
# bal,
# )
| 34.781627
| 126
| 0.525352
|
from Data import encode_data
from Models import utils
from Models import build_models
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
import tensorflow as tf
import copy
class CNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=128,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN04(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN05(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.15))
model.add(tf.keras.layers.Dense(units=units//4, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units*2, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class RNN(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=units,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.LSTM(
units=units//2,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
def run(datasets,
splice_sites,
sub_models,
save,
vis,
iter,
metrics,
summary,
config,
num_folds,
bal,
imbal,
imbal_t,
imbal_f,
batch_size,
epochs
):
network_rows = {
'acceptor':{
'nn269':90, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
'donor':{
'nn269':15, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
}
to_run = dict(
[
(sub_model,{
'nn269':'', 'ce':'',
'hs3d':'', 'hs2':'',
'ce2':'', 'dm':'',
'ar':'', 'or':''
}) for sub_model in sub_models
]
)
results = copy.deepcopy(to_run)
for sub_model in sub_models:
for dataset in datasets:
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
evals = dict(
[
(sub_model, {
'f1':'', 'precision':'',
'sensitivity':'', 'specificity':'',
'recall':'', 'mcc':'',
'err_rate':''
}) for sub_model in sub_models
]
)
for sub_model in sub_models:
for dataset in datasets:
if to_run[sub_model][dataset] == '':
pass
else:
results[sub_model][dataset] = utils.cross_validation(
num_folds,
sub_model,
splice_sites,
dataset,
to_run[sub_model][dataset],
network_rows,
evals,
summary,
config,
batch_size,
epochs,
save,
)
print(results)
return results
| true
| true
|
79037789097e9f44c8718db1e462de1fd6ab1be8
| 7,156
|
py
|
Python
|
MyAlgorithm/addWordsToParadigms_old.py
|
oncebasun/seq2seq-theano
|
9d905ed2fb392193e28d67272d3e3f1b5da613ac
|
[
"MIT"
] | null | null | null |
MyAlgorithm/addWordsToParadigms_old.py
|
oncebasun/seq2seq-theano
|
9d905ed2fb392193e28d67272d3e3f1b5da613ac
|
[
"MIT"
] | null | null | null |
MyAlgorithm/addWordsToParadigms_old.py
|
oncebasun/seq2seq-theano
|
9d905ed2fb392193e28d67272d3e3f1b5da613ac
|
[
"MIT"
] | null | null | null |
# Usage: testWordsInCorpus.py [language] {corpus file}
# If no corpus file is named, the programme will try to load a corresponding cPickle file.
#
# German corpus: /mounts/data/proj/huiming/SIGMORPHON/dewiki-20151102-pages-articles-multistream.xml
#
# This script finds words that should belong to a paradigm in the corpus and adds them (for training?).
from getEditTrees import editTreesByPos
from getEditTrees import applyOnlyTree
import sys
import pickle as cPickle
toAdd = {} # lemma to things that should be autocompleted
uniquenessCheck = {} # (lemma, form) -> word, avoiding that we add things we are unsure about
# New autocomplete. Finds union and checks if paradigms can complete each other.
# We suppose the union consists of at least 2 edit trees.
# TODO: account for Umlaute.
# Returns a dictinary lemma -> (et, tags) with things to add to the original one.
# TODO: irgendwas stimmt hier nicht. korrigiere es
def autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight1 = True
allRight2 = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3: # orig is 3
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
else:
if applyOnlyTree(lemma2,et) not in uniquenessCheck[(lemma2, form)]:
print("yeay")
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
# Lemma 1 has more ETs than lemma 2.
# Returns a dictinary lemma -> (et, tags) with things to add to the original one.
def autoComplete2(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3: # orig is 3
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
# Test if a group of (edit tree, tag) combinations for a lemma is subset of the one for another lemma.
# If yes, try if the missing edit trees are applicable and if the corresponding word appears in the corpus.
def getAdditionalWords(lemmaToEtAndTag, corpusWords):
isTrue = 0
isFalse = 0
for lemma1, etTag1 in lemmaToEtAndTag.items():
for lemma2, etTag2 in lemmaToEtAndTag.items():
if len(etTag1) <= 1 or len(etTag2) <= 1: # for now, don't complete things with 0 or only 1 entry. We are just not sure enough.
isFalse += 1
continue
maybeSame = False
if len(etTag1) > len(etTag2)+2:
if len(etTag1) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag2.issubset(etTag1):
maybeSame = True
autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords)
isTrue += 1
else:
isFalse += 1
elif len(etTag2) > len(etTag1)+2:
if len(etTag2) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag1.issubset(etTag2):
maybeSame = True
autoComplete(lemma2, etTag2, lemma1, etTag1, corpusWords)
isTrue += 1
else:
isFalse += 1
#print(str(len(toAdd)) + ' words have been added.')
#print("Is subset: " + str(isTrue))
#print("No subset: " + str(isFalse))
#sys.exit(0)
noWordsToAdd = 0
for lemma, aSet in toAdd.items():
noWordsToAdd += len(aSet)
'''
for (lemma, form), word in uniquenessCheck.items():
if len(word) > 1:
print(word)
sys.exit(0)
'''
return noWordsToAdd
def announce(*objs):
print("# ", *objs, file = sys.stderr)
if __name__ == "__main__":
lang = sys.argv[1]
if len(sys.argv) == 2:
usePickle = True
else:
usePickle = False
posToEt, lemmaToEtAndTag = editTreesByPos(lang)
for lemma, aSet in lemmaToEtAndTag.items():
for (et, form) in aSet:
if (lemma, form) not in uniquenessCheck:
uniquenessCheck[(lemma, form)] = set()
uniquenessCheck[(lemma, form)].add(applyOnlyTree(lemma, et))
#print(applyOnlyTree(lemma, et))
#sys.exit(0)
if not usePickle:
# Read the bonus corpus.
announce('Start reading corpus...')
corpusWords = {} # word to its frequency
with open(sys.argv[2], 'r') as corpus_file:
for line in corpus_file:
#tokens = tokenize.word_tokenize(line.strip())
tokens = line.strip().split(' ')
for token in tokens:
if token not in corpusWords:
corpusWords[token] = 0
corpusWords[token] += 1
announce('Done reading corpus.')
# Store the dictionary to a binary file.
print('Store the dictionary with the corpus words to a binary file...')
save_file = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'wb')
cPickle.dump(corpusWords, save_file, -1)
save_file.close()
print('Done.')
else:
# Load the corpusWords dictionary.
announce('Load the words with cPickle...')
vocListFile = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'rb')
corpusWords = cPickle.load(vocListFile)
vocListFile.close()
announce('Words loaded.')
lastNumber = 0
noWordsToAdd = 1
while noWordsToAdd > lastNumber:
lastNumber = noWordsToAdd
noWordsToAdd = getAdditionalWords(lemmaToEtAndTag, corpusWords)
for lemma, aSet in lemmaToEtAndTag.items():
if lemma in toAdd:
lemmaToEtAndTag[lemma] = lemmaToEtAndTag[lemma].union(toAdd[lemma])
announce('Number word to add: ' + str(noWordsToAdd))
# The union did not work well for some reason. Therefore, use toAdd directly.
additionalWordsCounter = 0
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-bigger-task1-train', 'w') as out_file:
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-task1-train', 'r') as original_file:
for line in original_file:
out_file.write(line)
for lemma, etAndTagSet in toAdd.items():
for (et, form) in etAndTagSet:
if len(uniquenessCheck[(lemma, form)]) > 1:
continue
out_file.write(lemma + '\t' + form + '\t' + applyOnlyTree(lemma, et) + '\n')
additionalWordsCounter += 1
print(str(additionalWordsCounter) + ' words have been added.')
| 34.570048
| 132
| 0.654556
|
from getEditTrees import editTreesByPos
from getEditTrees import applyOnlyTree
import sys
import pickle as cPickle
toAdd = {}
uniquenessCheck = {}
def autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight1 = True
allRight2 = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3:
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
else:
if applyOnlyTree(lemma2,et) not in uniquenessCheck[(lemma2, form)]:
print("yeay")
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
def autoComplete2(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3:
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
def getAdditionalWords(lemmaToEtAndTag, corpusWords):
isTrue = 0
isFalse = 0
for lemma1, etTag1 in lemmaToEtAndTag.items():
for lemma2, etTag2 in lemmaToEtAndTag.items():
if len(etTag1) <= 1 or len(etTag2) <= 1:
isFalse += 1
continue
maybeSame = False
if len(etTag1) > len(etTag2)+2:
if len(etTag1) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag2.issubset(etTag1):
maybeSame = True
autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords)
isTrue += 1
else:
isFalse += 1
elif len(etTag2) > len(etTag1)+2:
if len(etTag2) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag1.issubset(etTag2):
maybeSame = True
autoComplete(lemma2, etTag2, lemma1, etTag1, corpusWords)
isTrue += 1
else:
isFalse += 1
#print(str(len(toAdd)) + ' words have been added.')
#print("Is subset: " + str(isTrue))
#print("No subset: " + str(isFalse))
#sys.exit(0)
noWordsToAdd = 0
for lemma, aSet in toAdd.items():
noWordsToAdd += len(aSet)
return noWordsToAdd
def announce(*objs):
print("# ", *objs, file = sys.stderr)
if __name__ == "__main__":
lang = sys.argv[1]
if len(sys.argv) == 2:
usePickle = True
else:
usePickle = False
posToEt, lemmaToEtAndTag = editTreesByPos(lang)
for lemma, aSet in lemmaToEtAndTag.items():
for (et, form) in aSet:
if (lemma, form) not in uniquenessCheck:
uniquenessCheck[(lemma, form)] = set()
uniquenessCheck[(lemma, form)].add(applyOnlyTree(lemma, et))
#print(applyOnlyTree(lemma, et))
#sys.exit(0)
if not usePickle:
# Read the bonus corpus.
announce('Start reading corpus...')
corpusWords = {} # word to its frequency
with open(sys.argv[2], 'r') as corpus_file:
for line in corpus_file:
#tokens = tokenize.word_tokenize(line.strip())
tokens = line.strip().split(' ')
for token in tokens:
if token not in corpusWords:
corpusWords[token] = 0
corpusWords[token] += 1
announce('Done reading corpus.')
# Store the dictionary to a binary file.
print('Store the dictionary with the corpus words to a binary file...')
save_file = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'wb')
cPickle.dump(corpusWords, save_file, -1)
save_file.close()
print('Done.')
else:
# Load the corpusWords dictionary.
announce('Load the words with cPickle...')
vocListFile = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'rb')
corpusWords = cPickle.load(vocListFile)
vocListFile.close()
announce('Words loaded.')
lastNumber = 0
noWordsToAdd = 1
while noWordsToAdd > lastNumber:
lastNumber = noWordsToAdd
noWordsToAdd = getAdditionalWords(lemmaToEtAndTag, corpusWords)
for lemma, aSet in lemmaToEtAndTag.items():
if lemma in toAdd:
lemmaToEtAndTag[lemma] = lemmaToEtAndTag[lemma].union(toAdd[lemma])
announce('Number word to add: ' + str(noWordsToAdd))
# The union did not work well for some reason. Therefore, use toAdd directly.
additionalWordsCounter = 0
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-bigger-task1-train', 'w') as out_file:
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-task1-train', 'r') as original_file:
for line in original_file:
out_file.write(line)
for lemma, etAndTagSet in toAdd.items():
for (et, form) in etAndTagSet:
if len(uniquenessCheck[(lemma, form)]) > 1:
continue
out_file.write(lemma + '\t' + form + '\t' + applyOnlyTree(lemma, et) + '\n')
additionalWordsCounter += 1
print(str(additionalWordsCounter) + ' words have been added.')
| true
| true
|
79037853da791e43432d796bec456bd6930322d3
| 14,822
|
py
|
Python
|
ibmdbpy/tests/test_frame.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 21
|
2016-02-18T13:10:48.000Z
|
2020-11-09T00:09:07.000Z
|
ibmdbpy/tests/test_frame.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 57
|
2016-02-29T15:14:05.000Z
|
2021-07-23T07:19:41.000Z
|
ibmdbpy/tests/test_frame.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 17
|
2016-01-04T07:11:37.000Z
|
2021-11-05T12:45:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
Test module for IdaDataFrameObjects
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import pandas
import pytest
import six
import ibmdbpy
from ibmdbpy import IdaDataBase
class Test_OpenDataFrameObject(object):
def test_idadf_attr_idadb(self, idadf):
assert isinstance(idadf._idadb, IdaDataBase)
def test_idadf_attr_name(self, idadf, df):
assert isinstance(idadf.name, six.string_types)
assert idadf.name == idadf.schema + "." + "TEST_IBMDBPY"
assert idadf.name == idadf.schema + "." + idadf.tablename
def test_idadf_attr_schema(self, idadf):
assert isinstance(idadf.schema, six.string_types)
def test_idadf_attr_indexer(self, idadf):
assert (isinstance(idadf.indexer, six.string_types)|(idadf.indexer is None))
# TODO : Check more deeply the indexer
def test_idadf_attr_loc(self, idadf):
assert isinstance(idadf.loc, ibmdbpy.indexing.Loc)
def test_idadf_attr_internalstate(self, idadf):
assert isinstance(idadf.internal_state, ibmdbpy.internals.InternalState)
def test_idadf_attr_type(self, idadf):
assert isinstance(idadf.type, six.string_types)
assert idadf.type == "Table"
def test_idadf_atrr_dtypes(self, idadf, df):
assert isinstance(idadf.dtypes, pandas.core.frame.DataFrame)
assert len(idadf.dtypes) == len(idadf.columns)
assert len(idadf.dtypes) == len(df.columns)
def test_idadf_attr_index(self, idadf, df):
# Ok, but what do we do if too big ?
assert type(idadf.index) in [pandas.Int64Index, pandas.Index, pandas.RangeIndex] # Not sure here
assert list(idadf.index) == list(df.index)
def test_idadf_attr_columns(self, idadf, df):
assert isinstance(idadf.columns, pandas.core.index.Index)
assert idadf.columns.equals(df.columns)
def test_idadf_attr_axes(self, idadf):
assert isinstance(idadf.axes, list)
assert len(idadf.axes) == 2
assert idadf.axes[1].equals(idadf.columns)
assert list(idadf.axes[0]) == list(idadf.index)
def test_idadf_attr_shape(self, idadf, df):
assert isinstance(idadf.shape, tuple)
assert len(idadf.shape) == 2
assert idadf.shape[0] == len(idadf.index)
assert idadf.shape[1] == len(idadf.columns)
assert idadf.shape == df.shape
def test_idadf_empty(self, idadb, df):
idadb._create_table(df, "TEST_EMPTY_3496593727406047264076")
to_test = ibmdbpy.IdaDataFrame(idadb, "TEST_EMPTY_3496593727406047264076")
assert(to_test.empty is True)
idadb.drop_table("TEST_EMPTY_3496593727406047264076")
def test_idadf_len(self, idadf, df):
assert(len(idadf) == len(df))
def test_idadf_iter(self, idadf, df):
for idacol, col in zip(idadf, df):
assert(idacol == col)
class Test_IdaDataFrameBehavior(object):
def test_idadf_getitem_1_col_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[[idadf.columns[0]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[0] == newidadf.columns[0])
# We don't check of it is actually the corresponding column
newidadf = idadf[[idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[-1] == newidadf.columns[0])
def test_idadf_getitem_1_col_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[["NOTEXISTING_COLUMN_455849820205"]]
def test_idadf_getitem_2_cols_idadf(self, idadf):
if len(idadf.columns) >= 2:
newidadf = idadf[[idadf.columns[0], idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 2)
assert(idadf.columns[0] == newidadf.columns[0])
assert(idadf.columns[-1] == newidadf.columns[-1])
def test_idadf_getitem_2_cols_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[[idadf.columns[0], "NOTEXISTING_COLUMN_455849820205"]]
# TODO : FIX If you select twice the same columns, only one with be taken into account
# (This is because they are referenced in a dictionary, maybe force modifying the name of the columns)
def test_idadf_getitem_all_cols_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[list(idadf.columns)]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == len(idadf.columns))
assert(newidadf.shape == idadf.shape)
def test_idadf_getitem_idaseries(self, idadf):
if len(idadf.columns) >= 1:
newidaseries = idadf[idadf.columns[0]]
assert(isinstance(newidaseries, ibmdbpy.IdaSeries))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[0] == newidaseries.columns[0])
newidaseries = idadf[idadf.columns[-1]]
assert(isinstance(newidaseries, ibmdbpy.IdaDataFrame))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[-1] == newidaseries.columns[0])
def test_idadf_getitem_idaseries_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf["NOTEXISTING_COLUMN_455849820205"]
def test_idadf_getitem_idaseries_keyerror_several_columns(self, idadf):
if len(idadf.columns) >= 2:
with pytest.raises(KeyError):
idadf[idadf.columns[0], idadf.columns[1]]
def test_idadf_getitem_slice(self, idadb, idadf, idadf_tmp):
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
if len(idadf_tmp) > 10:
idadb.add_column_id(idadf_tmp, destructive = True)
newidadf_1 = idadf_tmp[0:9]
newidadf_2 = idadf_tmp[0:9]
assert(all(newidadf_1.head(10) == newidadf_2.head(10)))
def test_idaseries_getitem_slice(self, idadb, idadf, idadf_tmp):
# Set them as series first and do the same test as above
if len(idadf.columns) >= 1:
idadf = idadf[idadf.columns[0]]
idadf_tmp = idadf_tmp[idadf_tmp.columns[0]]
assert(isinstance(idadf, ibmdbpy.IdaDataFrame))
assert(isinstance(idadf_tmp, ibmdbpy.IdaSeries))
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
def test_idadf_setitem(self, idadf):
pass
def test_idadf_delitem(self, idadf):
pass
def test_idadf_filter_lt(self, idadf):
pass
def test_idadf_filter_le(self, idadf):
pass
def test_idadf_filter_eq(self, idadf):
pass
def test_idadf_filter_ne(self, idadf):
pass
def test_idadf_filter_ge(self, idadf):
pass
def test_idadf_filter_gt(self, idadf):
pass
def test_idadf_feature_add(self, idadf):
pass
def test_idadf_feature_radd(self, idadf):
pass
def test_idadf_feature_div(self, idadf):
pass
def test_idadf_feature_rdiv(self, idadf):
pass
def test_idadf_feature_floordiv(self, idadf):
pass
def test_idadf_feature_rfloordiv(self, idadf):
pass
def test_idadf_feature_mod(self, idadf):
pass
def test_idadf_feature_rmod(self, idadf):
pass
def test_idadf_feature_mul(self, idadf):
pass
def test_idadf_feature_rmul(self, idadf):
pass
def test_idadf_feature_neg(self, idadf):
pass
def test_idadf_feature_rpos(self, idadf):
pass
def test_idadf_feature_pow(self, idadf):
pass
def test_idadf_feature_rpow(self, idadf):
pass
def test_idadf_feature_sub(self, idadf):
pass
def test_idadf_feature_rsub(self, idadf):
pass
class Test_DataBaseFeatures(object):
def test_idadf_exists(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_is_view(self, idadf):
assert(idadf.is_view() is False)
pass
def test_idadf_is_table(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_get_primary_key(self, idadf):
pass
def test_idadf_ida_query(self, idadf):
pass
def test_idadf_ida_scalar_query(self, idadf):
pass
class Test_DataExploration(object):
### head
# For head and tail we do not test if the rows match because
# the order is not guaranteed anyway
def test_idadf_head_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_head = idadf.head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
df_head = df.sort_values(sortkey).head()
assert (ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_10(self, idadb, idadf, df):
ida_head = idadf.head(10)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_10_sort(self, idadb, idadf, df):
ida_head = idadf.head(10, sort=False)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_with_indexer(self, idadb, idadf_indexer, df):
ida_head = idadf_indexer.head()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_head_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_head = newidadf.head()
df_sorted = df.sort_values(sortkey)
df_head = df_sorted[columns].head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_head = newidadf.head()
df_head = df.sort_values(sortkey).head()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_0(self, idadf):
with pytest.raises(ValueError):
idadf.head(0)
def test_idadf_head_negative(self, idadf):
with pytest.raises(ValueError):
idadf.head(-1)
### tail
def test_idadf_tail_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_tail = idadf.tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
df_tail = df.sort_values(sortkey).tail()
assert (ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_10(self, idadb, idadf, df):
ida_tail = idadf.tail(10)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_10_sort(self, idadb, idadf, df):
ida_tail = idadf.tail(10, sort=False)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_with_indexer(self, idadb, idadf_indexer, df):
ida_tail = idadf_indexer.tail()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_tail_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_tail = newidadf.tail()
df_sorted = df.sort_values(sortkey)
df_tail = df_sorted[columns].tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
@pytest.mark.skip(reason="tail on sorted dataframe fails in general, needs fixing first")
def test_idadf_tail_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_tail = newidadf.tail()
df_tail = df.sort_values(sortkey).tail()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_0(self, idadf):
with pytest.raises(ValueError):
idadf.tail(0)
def test_idadf_tail_negative(self, idadf):
with pytest.raises(ValueError):
idadf.tail(-1)
def test_idadf_pivot_table(self, idadf):
pass
def test_idadf_sort(self, idadf):
pass
# no test
#__enter__
#__exit__
| 34.152074
| 106
| 0.643773
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import pandas
import pytest
import six
import ibmdbpy
from ibmdbpy import IdaDataBase
class Test_OpenDataFrameObject(object):
def test_idadf_attr_idadb(self, idadf):
assert isinstance(idadf._idadb, IdaDataBase)
def test_idadf_attr_name(self, idadf, df):
assert isinstance(idadf.name, six.string_types)
assert idadf.name == idadf.schema + "." + "TEST_IBMDBPY"
assert idadf.name == idadf.schema + "." + idadf.tablename
def test_idadf_attr_schema(self, idadf):
assert isinstance(idadf.schema, six.string_types)
def test_idadf_attr_indexer(self, idadf):
assert (isinstance(idadf.indexer, six.string_types)|(idadf.indexer is None))
def test_idadf_attr_loc(self, idadf):
assert isinstance(idadf.loc, ibmdbpy.indexing.Loc)
def test_idadf_attr_internalstate(self, idadf):
assert isinstance(idadf.internal_state, ibmdbpy.internals.InternalState)
def test_idadf_attr_type(self, idadf):
assert isinstance(idadf.type, six.string_types)
assert idadf.type == "Table"
def test_idadf_atrr_dtypes(self, idadf, df):
assert isinstance(idadf.dtypes, pandas.core.frame.DataFrame)
assert len(idadf.dtypes) == len(idadf.columns)
assert len(idadf.dtypes) == len(df.columns)
def test_idadf_attr_index(self, idadf, df):
assert type(idadf.index) in [pandas.Int64Index, pandas.Index, pandas.RangeIndex]
assert list(idadf.index) == list(df.index)
def test_idadf_attr_columns(self, idadf, df):
assert isinstance(idadf.columns, pandas.core.index.Index)
assert idadf.columns.equals(df.columns)
def test_idadf_attr_axes(self, idadf):
assert isinstance(idadf.axes, list)
assert len(idadf.axes) == 2
assert idadf.axes[1].equals(idadf.columns)
assert list(idadf.axes[0]) == list(idadf.index)
def test_idadf_attr_shape(self, idadf, df):
assert isinstance(idadf.shape, tuple)
assert len(idadf.shape) == 2
assert idadf.shape[0] == len(idadf.index)
assert idadf.shape[1] == len(idadf.columns)
assert idadf.shape == df.shape
def test_idadf_empty(self, idadb, df):
idadb._create_table(df, "TEST_EMPTY_3496593727406047264076")
to_test = ibmdbpy.IdaDataFrame(idadb, "TEST_EMPTY_3496593727406047264076")
assert(to_test.empty is True)
idadb.drop_table("TEST_EMPTY_3496593727406047264076")
def test_idadf_len(self, idadf, df):
assert(len(idadf) == len(df))
def test_idadf_iter(self, idadf, df):
for idacol, col in zip(idadf, df):
assert(idacol == col)
class Test_IdaDataFrameBehavior(object):
def test_idadf_getitem_1_col_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[[idadf.columns[0]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[0] == newidadf.columns[0])
newidadf = idadf[[idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[-1] == newidadf.columns[0])
def test_idadf_getitem_1_col_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[["NOTEXISTING_COLUMN_455849820205"]]
def test_idadf_getitem_2_cols_idadf(self, idadf):
if len(idadf.columns) >= 2:
newidadf = idadf[[idadf.columns[0], idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 2)
assert(idadf.columns[0] == newidadf.columns[0])
assert(idadf.columns[-1] == newidadf.columns[-1])
def test_idadf_getitem_2_cols_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[[idadf.columns[0], "NOTEXISTING_COLUMN_455849820205"]]
# TODO : FIX If you select twice the same columns, only one with be taken into account
# (This is because they are referenced in a dictionary, maybe force modifying the name of the columns)
def test_idadf_getitem_all_cols_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[list(idadf.columns)]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == len(idadf.columns))
assert(newidadf.shape == idadf.shape)
def test_idadf_getitem_idaseries(self, idadf):
if len(idadf.columns) >= 1:
newidaseries = idadf[idadf.columns[0]]
assert(isinstance(newidaseries, ibmdbpy.IdaSeries))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[0] == newidaseries.columns[0])
newidaseries = idadf[idadf.columns[-1]]
assert(isinstance(newidaseries, ibmdbpy.IdaDataFrame))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[-1] == newidaseries.columns[0])
def test_idadf_getitem_idaseries_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf["NOTEXISTING_COLUMN_455849820205"]
def test_idadf_getitem_idaseries_keyerror_several_columns(self, idadf):
if len(idadf.columns) >= 2:
with pytest.raises(KeyError):
idadf[idadf.columns[0], idadf.columns[1]]
def test_idadf_getitem_slice(self, idadb, idadf, idadf_tmp):
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
if len(idadf_tmp) > 10:
idadb.add_column_id(idadf_tmp, destructive = True)
newidadf_1 = idadf_tmp[0:9]
newidadf_2 = idadf_tmp[0:9]
assert(all(newidadf_1.head(10) == newidadf_2.head(10)))
def test_idaseries_getitem_slice(self, idadb, idadf, idadf_tmp):
# Set them as series first and do the same test as above
if len(idadf.columns) >= 1:
idadf = idadf[idadf.columns[0]]
idadf_tmp = idadf_tmp[idadf_tmp.columns[0]]
assert(isinstance(idadf, ibmdbpy.IdaDataFrame))
assert(isinstance(idadf_tmp, ibmdbpy.IdaSeries))
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
def test_idadf_setitem(self, idadf):
pass
def test_idadf_delitem(self, idadf):
pass
def test_idadf_filter_lt(self, idadf):
pass
def test_idadf_filter_le(self, idadf):
pass
def test_idadf_filter_eq(self, idadf):
pass
def test_idadf_filter_ne(self, idadf):
pass
def test_idadf_filter_ge(self, idadf):
pass
def test_idadf_filter_gt(self, idadf):
pass
def test_idadf_feature_add(self, idadf):
pass
def test_idadf_feature_radd(self, idadf):
pass
def test_idadf_feature_div(self, idadf):
pass
def test_idadf_feature_rdiv(self, idadf):
pass
def test_idadf_feature_floordiv(self, idadf):
pass
def test_idadf_feature_rfloordiv(self, idadf):
pass
def test_idadf_feature_mod(self, idadf):
pass
def test_idadf_feature_rmod(self, idadf):
pass
def test_idadf_feature_mul(self, idadf):
pass
def test_idadf_feature_rmul(self, idadf):
pass
def test_idadf_feature_neg(self, idadf):
pass
def test_idadf_feature_rpos(self, idadf):
pass
def test_idadf_feature_pow(self, idadf):
pass
def test_idadf_feature_rpow(self, idadf):
pass
def test_idadf_feature_sub(self, idadf):
pass
def test_idadf_feature_rsub(self, idadf):
pass
class Test_DataBaseFeatures(object):
def test_idadf_exists(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_is_view(self, idadf):
assert(idadf.is_view() is False)
pass
def test_idadf_is_table(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_get_primary_key(self, idadf):
pass
def test_idadf_ida_query(self, idadf):
pass
def test_idadf_ida_scalar_query(self, idadf):
pass
class Test_DataExploration(object):
### head
# For head and tail we do not test if the rows match because
# the order is not guaranteed anyway
def test_idadf_head_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_head = idadf.head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
df_head = df.sort_values(sortkey).head()
assert (ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_10(self, idadb, idadf, df):
ida_head = idadf.head(10)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_10_sort(self, idadb, idadf, df):
ida_head = idadf.head(10, sort=False)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_with_indexer(self, idadb, idadf_indexer, df):
ida_head = idadf_indexer.head()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_head_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_head = newidadf.head()
df_sorted = df.sort_values(sortkey)
df_head = df_sorted[columns].head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_head = newidadf.head()
df_head = df.sort_values(sortkey).head()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_0(self, idadf):
with pytest.raises(ValueError):
idadf.head(0)
def test_idadf_head_negative(self, idadf):
with pytest.raises(ValueError):
idadf.head(-1)
### tail
def test_idadf_tail_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_tail = idadf.tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
df_tail = df.sort_values(sortkey).tail()
assert (ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_10(self, idadb, idadf, df):
ida_tail = idadf.tail(10)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_10_sort(self, idadb, idadf, df):
ida_tail = idadf.tail(10, sort=False)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_with_indexer(self, idadb, idadf_indexer, df):
ida_tail = idadf_indexer.tail()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_tail_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_tail = newidadf.tail()
df_sorted = df.sort_values(sortkey)
df_tail = df_sorted[columns].tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
@pytest.mark.skip(reason="tail on sorted dataframe fails in general, needs fixing first")
def test_idadf_tail_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_tail = newidadf.tail()
df_tail = df.sort_values(sortkey).tail()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_0(self, idadf):
with pytest.raises(ValueError):
idadf.tail(0)
def test_idadf_tail_negative(self, idadf):
with pytest.raises(ValueError):
idadf.tail(-1)
def test_idadf_pivot_table(self, idadf):
pass
def test_idadf_sort(self, idadf):
pass
# no test
#__enter__
#__exit__
| true
| true
|
79037932fc54e1c2f6ed6419f33b160d082a18e5
| 14,654
|
py
|
Python
|
modoboa_webmail/tests/test_views.py
|
mohamed-ghayyad/modoboa-webmail
|
4fefb3cfadc97e416a0f2b76356c7ec97b2b6040
|
[
"MIT"
] | 59
|
2015-06-02T10:12:31.000Z
|
2022-03-29T17:52:30.000Z
|
modoboa_webmail/tests/test_views.py
|
mohamed-ghayyad/modoboa-webmail
|
4fefb3cfadc97e416a0f2b76356c7ec97b2b6040
|
[
"MIT"
] | 222
|
2015-04-29T16:26:17.000Z
|
2022-02-28T08:05:25.000Z
|
modoboa_webmail/tests/test_views.py
|
mohamed-ghayyad/modoboa-webmail
|
4fefb3cfadc97e416a0f2b76356c7ec97b2b6040
|
[
"MIT"
] | 45
|
2015-03-19T11:14:51.000Z
|
2022-03-14T08:03:49.000Z
|
# coding: utf-8
"""Webmail tests."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
try:
import mock
except ImportError:
from unittest import mock
from six import BytesIO
from django.core import mail
from django.urls import reverse
from modoboa.admin import factories as admin_factories
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoTestCase
from . import data as tests_data
BODYSTRUCTURE_SAMPLE_WITH_FLAGS = [
(b'19 (UID 19 FLAGS (\\Seen) RFC822.SIZE 100000 BODYSTRUCTURE (("text" "plain" ("charset" "ISO-8859-1" "format" "flowed") NIL NIL "7bit" 2 1 NIL NIL NIL NIL)("message" "rfc822" ("name*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74") NIL NIL "8bit" 3632 ("Wed, 13 Dec 2006 20:30:02 +0100" {70}', # noqa
b"[INSCRIPTION] R\xe9c\xe9ption de votre dossier d'inscription Free Haut D\xe9bit"), # noqa
(b' (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) ((NIL NIL "hautdebit" "freetelecom.fr")) ((NIL NIL "nguyen.antoine" "wanadoo.fr")) NIL NIL NIL "<20061213193125.9DA0919AC@dgroup2-2.proxad.net>") ("text" "plain" ("charset" "iso-8859-1") NIL NIL "8bit" 1428 38 NIL ("inline" NIL) NIL NIL) 76 NIL ("inline" ("filename*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74")) NIL NIL) "mixed" ("boundary" "------------040706080908000209030901") NIL NIL NIL) BODY[HEADER.FIELDS (DATE FROM TO CC SUBJECT)] {266}', # noqa
b'Date: Tue, 19 Dec 2006 19:50:13 +0100\r\nFrom: Antoine Nguyen <nguyen.antoine@wanadoo.fr>\r\nTo: Antoine Nguyen <tonio@koalabs.org>\r\nSubject: [Fwd: [INSCRIPTION] =?ISO-8859-1?Q?R=E9c=E9ption_de_votre_?=\r\n =?ISO-8859-1?Q?dossier_d=27inscription_Free_Haut_D=E9bit=5D?=\r\n\r\n'
),
b')'
]
def get_gif():
"""Return gif."""
gif = BytesIO(
b"GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00"
b"\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;")
gif.name = "image.gif"
return gif
class IMAP4Mock(object):
"""Fake IMAP4 client."""
def __init__(self, *args, **kwargs):
self.untagged_responses = {}
def _quote(self, data):
return data
def _simple_command(self, name, *args, **kwargs):
if name == "CAPABILITY":
self.untagged_responses["CAPABILITY"] = [b""]
elif name == "LIST":
self.untagged_responses["LIST"] = [b"() \".\" \"INBOX\""]
elif name == "NAMESPACE":
self.untagged_responses["NAMESPACE"] = [b'(("" "/")) NIL NIL']
return "OK", None
def append(self, *args, **kwargs):
pass
def create(self, name):
return "OK", None
def delete(self, name):
return "OK", None
def list(self):
return "OK", [b"() \".\" \"INBOX\""]
def rename(self, oldname, newname):
return "OK", None
def uid(self, command, *args):
if command == "SORT":
return "OK", [b"19"]
elif command == "FETCH":
uid = int(args[0])
data = BODYSTRUCTURE_SAMPLE_WITH_FLAGS
if uid == 46931:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_4
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_4
else:
data = tests_data.BODY_PLAIN_4
elif uid == 46932:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_5
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_9
else:
data = tests_data.BODYSTRUCTURE_SAMPLE_10
elif uid == 33:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_EMPTY_MAIL
else:
data = tests_data.EMPTY_BODY
elif uid == 133872:
data = tests_data.COMPLETE_MAIL
return "OK", data
elif command == "STORE":
return "OK", []
class WebmailTestCase(ModoTestCase):
"""Check webmail backend."""
@classmethod
def setUpTestData(cls): # noqa
"""Create some users."""
super(WebmailTestCase, cls).setUpTestData()
admin_factories.populate_database()
cls.user = core_models.User.objects.get(username="user@test.com")
def setUp(self):
"""Connect with a simpler user."""
patcher = mock.patch("imaplib.IMAP4")
self.mock_imap4 = patcher.start()
self.mock_imap4.return_value = IMAP4Mock()
self.addCleanup(patcher.stop)
self.set_global_parameter("imap_port", 1435)
self.workdir = tempfile.mkdtemp()
os.mkdir("{}/webmail".format(self.workdir))
self.set_global_parameter("update_scheme", False, app="core")
url = reverse("core:login")
data = {
"username": self.user.username, "password": "toto"
}
self.client.post(url, data)
def tearDown(self):
"""Cleanup."""
shutil.rmtree(self.workdir)
def test_listmailbox(self):
"""Check listmailbox action."""
url = reverse("modoboa_webmail:index")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"{}?action=listmailbox".format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"nguyen.antoine@wanadoo.fr", response.json()["listing"])
response = self.client.get(
"{}?action=listmailbox&pattern=Réception&criteria=Subject"
.format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"nguyen.antoine@wanadoo.fr", response.json()["listing"])
def test_attachments(self):
"""Check attachments."""
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.set_global_parameters({"max_attachment_size": "10"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "Attachment is too big")
self.set_global_parameters({"max_attachment_size": "10K"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "upload_success")
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
name = self.client.session["compose_mail"]["attachments"][0]["tmpname"]
path = "{}/webmail/{}".format(self.workdir, name)
self.assertTrue(os.path.exists(path))
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
self.ajax_get("{}?name={}".format(url, name))
self.assertFalse(os.path.exists(path))
def test_delattachment_errors(self):
"""Check error cases."""
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Bad query")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=test".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Unknown attachment")
def test_send_mail(self):
"""Check compose form."""
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "user@test.com")
# Try to send an email using HTML format
self.user.first_name = "Antoine"
self.user.last_name = "Nguyen"
self.user.parameters.set_value("editor", "html")
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
mail.outbox = []
response = self.client.post(
url, {
"from_": self.user.email,
"to": "test@example.test", "subject": "test",
"body": "<p>Test</p>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, '"Antoine Nguyen" <user@test.com>')
def test_signature(self):
"""Check signature in different formats."""
signature = "Antoine Nguyen"
self.user.parameters.set_value("signature", signature)
self.user.save()
response = self.client.get(reverse("modoboa_webmail:index"))
self.assertEqual(response.status_code, 200)
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.ajax_get(url)
self.assertIn(signature, response["listing"])
def test_custom_js_in_preferences(self):
"""Check that custom js is included."""
url = reverse("core:user_index")
response = self.client.get(url)
self.assertContains(response, "function toggleSignatureEditor()")
def test_send_mail_errors(self):
"""Check error cases."""
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.ajax_post(
url, {"to": "", "subject": "test", "body": "Test"}, 400
)
self.assertEqual(len(mail.outbox), 0)
def test_new_folder(self):
"""Test folder creation."""
url = reverse("modoboa_webmail:folder_add")
response = self.client.get(url)
self.assertContains(response, "Create a new folder")
response = self.ajax_post(url, {"name": "Test"})
self.assertIn("newmb", response)
def test_edit_folder(self):
"""Test folder edition."""
url = reverse("modoboa_webmail:folder_change")
response = self.client.get(url)
self.assertContains(response, "Invalid request")
url = "{}?name=Test".format(url)
response = self.client.get(url)
self.assertContains(response, "Edit folder")
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
response = self.ajax_post(url, {"oldname": "Test", "name": "Toto"})
self.assertEqual(response["respmsg"], "Folder updated")
def test_delete_folder(self):
"""Test folder removal."""
url = reverse("modoboa_webmail:folder_delete")
self.ajax_get(url, status=400)
url = "{}?name=Test".format(url)
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
self.ajax_get(url)
def test_reply_to_email(self):
"""Test reply form."""
url = "{}?action=reply&mbox=INBOX&mailid=46931".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
response = self.ajax_get(url)
self.assertIn('id="id_origmsgid"', response["listing"])
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "user@test.com")
self.assertIn("References", mail.outbox[0].extra_headers)
def test_forward_email(self):
"""Test forward form."""
url = "{}?action=forward&mbox=INBOX&mailid=46932".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.get(
url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
response = response.json()
self.assertIn('id="id_origmsgid"', response["listing"])
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
def test_getmailcontent_empty_mail(self):
"""Try to display an empty email."""
url = "{}?action=reply&mbox=INBOX&mailid=33".format(
reverse("modoboa_webmail:mailcontent_get"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_getmailsource(self):
"""Try to display a message's source."""
url = "{}?mbox=INBOX&mailid=133872".format(
reverse("modoboa_webmail:mailsource_get"))
response = self.client.get(url)
self.assertContains(response, "Message-ID")
| 39.392473
| 762
| 0.600792
|
from __future__ import unicode_literals
import os
import shutil
import tempfile
try:
import mock
except ImportError:
from unittest import mock
from six import BytesIO
from django.core import mail
from django.urls import reverse
from modoboa.admin import factories as admin_factories
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoTestCase
from . import data as tests_data
BODYSTRUCTURE_SAMPLE_WITH_FLAGS = [
(b'19 (UID 19 FLAGS (\\Seen) RFC822.SIZE 100000 BODYSTRUCTURE (("text" "plain" ("charset" "ISO-8859-1" "format" "flowed") NIL NIL "7bit" 2 1 NIL NIL NIL NIL)("message" "rfc822" ("name*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74") NIL NIL "8bit" 3632 ("Wed, 13 Dec 2006 20:30:02 +0100" {70}',
b"[INSCRIPTION] R\xe9c\xe9ption de votre dossier d'inscription Free Haut D\xe9bit"), # noqa
(b' (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) ((NIL NIL "hautdebit" "freetelecom.fr")) ((NIL NIL "nguyen.antoine" "wanadoo.fr")) NIL NIL NIL "<20061213193125.9DA0919AC@dgroup2-2.proxad.net>") ("text" "plain" ("charset" "iso-8859-1") NIL NIL "8bit" 1428 38 NIL ("inline" NIL) NIL NIL) 76 NIL ("inline" ("filename*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74")) NIL NIL) "mixed" ("boundary" "------------040706080908000209030901") NIL NIL NIL) BODY[HEADER.FIELDS (DATE FROM TO CC SUBJECT)] {266}', # noqa
b'Date: Tue, 19 Dec 2006 19:50:13 +0100\r\nFrom: Antoine Nguyen <nguyen.antoine@wanadoo.fr>\r\nTo: Antoine Nguyen <tonio@koalabs.org>\r\nSubject: [Fwd: [INSCRIPTION] =?ISO-8859-1?Q?R=E9c=E9ption_de_votre_?=\r\n =?ISO-8859-1?Q?dossier_d=27inscription_Free_Haut_D=E9bit=5D?=\r\n\r\n'
),
b')'
]
def get_gif():
gif = BytesIO(
b"GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00"
b"\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;")
gif.name = "image.gif"
return gif
class IMAP4Mock(object):
def __init__(self, *args, **kwargs):
self.untagged_responses = {}
def _quote(self, data):
return data
def _simple_command(self, name, *args, **kwargs):
if name == "CAPABILITY":
self.untagged_responses["CAPABILITY"] = [b""]
elif name == "LIST":
self.untagged_responses["LIST"] = [b"() \".\" \"INBOX\""]
elif name == "NAMESPACE":
self.untagged_responses["NAMESPACE"] = [b'(("" "/")) NIL NIL']
return "OK", None
def append(self, *args, **kwargs):
pass
def create(self, name):
return "OK", None
def delete(self, name):
return "OK", None
def list(self):
return "OK", [b"() \".\" \"INBOX\""]
def rename(self, oldname, newname):
return "OK", None
def uid(self, command, *args):
if command == "SORT":
return "OK", [b"19"]
elif command == "FETCH":
uid = int(args[0])
data = BODYSTRUCTURE_SAMPLE_WITH_FLAGS
if uid == 46931:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_4
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_4
else:
data = tests_data.BODY_PLAIN_4
elif uid == 46932:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_5
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_9
else:
data = tests_data.BODYSTRUCTURE_SAMPLE_10
elif uid == 33:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_EMPTY_MAIL
else:
data = tests_data.EMPTY_BODY
elif uid == 133872:
data = tests_data.COMPLETE_MAIL
return "OK", data
elif command == "STORE":
return "OK", []
class WebmailTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls): # noqa
super(WebmailTestCase, cls).setUpTestData()
admin_factories.populate_database()
cls.user = core_models.User.objects.get(username="user@test.com")
def setUp(self):
patcher = mock.patch("imaplib.IMAP4")
self.mock_imap4 = patcher.start()
self.mock_imap4.return_value = IMAP4Mock()
self.addCleanup(patcher.stop)
self.set_global_parameter("imap_port", 1435)
self.workdir = tempfile.mkdtemp()
os.mkdir("{}/webmail".format(self.workdir))
self.set_global_parameter("update_scheme", False, app="core")
url = reverse("core:login")
data = {
"username": self.user.username, "password": "toto"
}
self.client.post(url, data)
def tearDown(self):
shutil.rmtree(self.workdir)
def test_listmailbox(self):
url = reverse("modoboa_webmail:index")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"{}?action=listmailbox".format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"nguyen.antoine@wanadoo.fr", response.json()["listing"])
response = self.client.get(
"{}?action=listmailbox&pattern=Réception&criteria=Subject"
.format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"nguyen.antoine@wanadoo.fr", response.json()["listing"])
def test_attachments(self):
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.set_global_parameters({"max_attachment_size": "10"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "Attachment is too big")
self.set_global_parameters({"max_attachment_size": "10K"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "upload_success")
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
name = self.client.session["compose_mail"]["attachments"][0]["tmpname"]
path = "{}/webmail/{}".format(self.workdir, name)
self.assertTrue(os.path.exists(path))
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
self.ajax_get("{}?name={}".format(url, name))
self.assertFalse(os.path.exists(path))
def test_delattachment_errors(self):
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Bad query")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=test".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Unknown attachment")
def test_send_mail(self):
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "user@test.com")
# Try to send an email using HTML format
self.user.first_name = "Antoine"
self.user.last_name = "Nguyen"
self.user.parameters.set_value("editor", "html")
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
mail.outbox = []
response = self.client.post(
url, {
"from_": self.user.email,
"to": "test@example.test", "subject": "test",
"body": "<p>Test</p>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, '"Antoine Nguyen" <user@test.com>')
def test_signature(self):
signature = "Antoine Nguyen"
self.user.parameters.set_value("signature", signature)
self.user.save()
response = self.client.get(reverse("modoboa_webmail:index"))
self.assertEqual(response.status_code, 200)
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.ajax_get(url)
self.assertIn(signature, response["listing"])
def test_custom_js_in_preferences(self):
url = reverse("core:user_index")
response = self.client.get(url)
self.assertContains(response, "function toggleSignatureEditor()")
def test_send_mail_errors(self):
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.ajax_post(
url, {"to": "", "subject": "test", "body": "Test"}, 400
)
self.assertEqual(len(mail.outbox), 0)
def test_new_folder(self):
url = reverse("modoboa_webmail:folder_add")
response = self.client.get(url)
self.assertContains(response, "Create a new folder")
response = self.ajax_post(url, {"name": "Test"})
self.assertIn("newmb", response)
def test_edit_folder(self):
url = reverse("modoboa_webmail:folder_change")
response = self.client.get(url)
self.assertContains(response, "Invalid request")
url = "{}?name=Test".format(url)
response = self.client.get(url)
self.assertContains(response, "Edit folder")
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
response = self.ajax_post(url, {"oldname": "Test", "name": "Toto"})
self.assertEqual(response["respmsg"], "Folder updated")
def test_delete_folder(self):
url = reverse("modoboa_webmail:folder_delete")
self.ajax_get(url, status=400)
url = "{}?name=Test".format(url)
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
self.ajax_get(url)
def test_reply_to_email(self):
url = "{}?action=reply&mbox=INBOX&mailid=46931".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
response = self.ajax_get(url)
self.assertIn('id="id_origmsgid"', response["listing"])
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "user@test.com")
self.assertIn("References", mail.outbox[0].extra_headers)
def test_forward_email(self):
url = "{}?action=forward&mbox=INBOX&mailid=46932".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.get(
url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
response = response.json()
self.assertIn('id="id_origmsgid"', response["listing"])
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
response = self.client.post(
url, {
"from_": self.user.email, "to": "test@example.test",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
def test_getmailcontent_empty_mail(self):
url = "{}?action=reply&mbox=INBOX&mailid=33".format(
reverse("modoboa_webmail:mailcontent_get"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_getmailsource(self):
url = "{}?mbox=INBOX&mailid=133872".format(
reverse("modoboa_webmail:mailsource_get"))
response = self.client.get(url)
self.assertContains(response, "Message-ID")
| true
| true
|
79037ae08e84e6a9933934f02dbf2f6a09f5b19d
| 3,112
|
py
|
Python
|
configs/retinanet/traffic_sign/retinanet_r50_fpn_1x_traffic_sign.py
|
tuanphan09/mmdetection
|
ee63547c02c615f9c61a13e3f34747098a9cd90a
|
[
"Apache-2.0"
] | null | null | null |
configs/retinanet/traffic_sign/retinanet_r50_fpn_1x_traffic_sign.py
|
tuanphan09/mmdetection
|
ee63547c02c615f9c61a13e3f34747098a9cd90a
|
[
"Apache-2.0"
] | null | null | null |
configs/retinanet/traffic_sign/retinanet_r50_fpn_1x_traffic_sign.py
|
tuanphan09/mmdetection
|
ee63547c02c615f9c61a13e3f34747098a9cd90a
|
[
"Apache-2.0"
] | null | null | null |
# The new config inherits a base config to highlight the necessary modification
_base_ = '../retinanet_r50_fpn_1x_coco.py'
# We also need to change the num_classes in head to match the dataset's annotation
model = dict(
pretrained=None,
)
# Modify dataset related settings
dataset_type = 'COCODataset'
classes = ('Cấm ngược chiều', 'Cấm dừng và đỗ', 'Cấm rẽ', 'Giới hạn tốc độ', 'Cấm còn lại', 'Nguy hiểm', 'Hiệu lệnh')
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
data = dict(
samples_per_gpu=2, # Batch size of a single GPU
workers_per_gpu=2, # Worker to pre-fetch data for each single GPU
train=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/train.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(1622, 622),
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
),
val=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/val.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
test=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/test.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
)
| 37.493976
| 117
| 0.54563
|
_base_ = '../retinanet_r50_fpn_1x_coco.py'
model = dict(
pretrained=None,
)
# Modify dataset related settings
dataset_type = 'COCODataset'
classes = ('Cấm ngược chiều', 'Cấm dừng và đỗ', 'Cấm rẽ', 'Giới hạn tốc độ', 'Cấm còn lại', 'Nguy hiểm', 'Hiệu lệnh')
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
data = dict(
samples_per_gpu=2, # Batch size of a single GPU
workers_per_gpu=2, # Worker to pre-fetch data for each single GPU
train=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/train.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(1622, 622),
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
),
val=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/val.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
test=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/test.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
)
| true
| true
|
79037ae19f271b14b11a38feecdea1226e2d05ad
| 327
|
py
|
Python
|
apps/store/migrations/0002_remove_payment_paystack_response.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | 1
|
2021-09-29T18:48:00.000Z
|
2021-09-29T18:48:00.000Z
|
apps/store/migrations/0002_remove_payment_paystack_response.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | null | null | null |
apps/store/migrations/0002_remove_payment_paystack_response.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-09-28 13:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='paystack_response',
),
]
| 18.166667
| 47
| 0.590214
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='paystack_response',
),
]
| true
| true
|
79037b880dca6434ef7b651515969c8dcfcbb25b
| 5,446
|
py
|
Python
|
watson/frames.py
|
cazador481/Watson
|
6af92cba8f1b0eafba84025afc140ec3c38cd185
|
[
"MIT"
] | null | null | null |
watson/frames.py
|
cazador481/Watson
|
6af92cba8f1b0eafba84025afc140ec3c38cd185
|
[
"MIT"
] | null | null | null |
watson/frames.py
|
cazador481/Watson
|
6af92cba8f1b0eafba84025afc140ec3c38cd185
|
[
"MIT"
] | null | null | null |
import uuid
import arrow
from collections import namedtuple
HEADERS = ('start', 'stop', 'project', 'id', 'tags', 'updated_at')
class Frame(namedtuple('Frame', HEADERS)):
def __new__(cls, start, stop, project, id, tags=None, updated_at=None,):
try:
if not isinstance(start, arrow.Arrow):
start = arrow.get(start)
if not isinstance(stop, arrow.Arrow):
stop = arrow.get(stop)
if updated_at is None:
updated_at = arrow.utcnow()
elif not isinstance(updated_at, arrow.Arrow):
updated_at = arrow.get(updated_at)
except (ValueError, TypeError) as e:
from .watson import WatsonError
raise WatsonError("Error converting date: {}".format(e))
start = start.to('local')
stop = stop.to('local')
if tags is None:
tags = []
return super(Frame, cls).__new__(
cls, start, stop, project, id, tags, updated_at
)
def dump(self):
start = self.start.to('utc').int_timestamp
stop = self.stop.to('utc').int_timestamp
updated_at = self.updated_at.int_timestamp
return (start, stop, self.project, self.id, self.tags, updated_at)
@property
def day(self):
return self.start.floor('day')
def __lt__(self, other):
return self.start < other.start
def __lte__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __gte__(self, other):
return self.start >= other.start
class Span(object):
def __init__(self, start, stop, timeframe='day'):
self.timeframe = timeframe
self.start = start.floor(self.timeframe)
self.stop = stop.ceil(self.timeframe)
def overlaps(self, frame):
return frame.start <= self.stop and frame.stop >= self.start
def __contains__(self, frame):
return frame.start >= self.start and frame.stop <= self.stop
class Frames(object):
def __init__(self, frames=None):
if not frames:
frames = []
rows = [Frame(*frame) for frame in frames]
self._rows = rows
self.changed = False
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
if key in HEADERS:
return tuple(self._get_col(key))
elif isinstance(key, int):
return self._rows[key]
else:
return self._rows[self._get_index_by_id(key)]
def __setitem__(self, key, value):
self.changed = True
if isinstance(value, Frame):
frame = value
else:
frame = self.new_frame(*value)
if isinstance(key, int):
self._rows[key] = frame
else:
frame = frame._replace(id=key)
try:
self._rows[self._get_index_by_id(key)] = frame
except KeyError:
self._rows.append(frame)
def __delitem__(self, key):
self.changed = True
if isinstance(key, int):
del self._rows[key]
else:
del self._rows[self._get_index_by_id(key)]
def _get_index_by_id(self, id):
try:
return next(
i for i, v in enumerate(self['id']) if v.startswith(id)
)
except StopIteration:
raise KeyError("Frame with id {} not found.".format(id))
def _get_col(self, col):
index = HEADERS.index(col)
for row in self._rows:
yield row[index]
def add(self, *args, **kwargs):
self.changed = True
frame = self.new_frame(*args, **kwargs)
self._rows.append(frame)
return frame
def new_frame(self, project, start, stop, tags=None, id=None,
updated_at=None):
if not id:
id = uuid.uuid4().hex
return Frame(start, stop, project, id, tags=tags,
updated_at=updated_at)
def dump(self):
return tuple(frame.dump() for frame in self._rows)
def filter(
self,
projects=None,
tags=None,
ignore_projects=None,
ignore_tags=None,
span=None,
include_partial_frames=False,
):
for frame in self._rows:
if projects is not None and frame.project not in projects:
continue
if ignore_projects is not None and\
frame.project in ignore_projects:
continue
if tags is not None and not any(tag in frame.tags for tag in tags):
continue
if ignore_tags is not None and\
any(tag in frame.tags for tag in ignore_tags):
continue
if span is None:
yield frame
elif frame in span:
yield frame
elif include_partial_frames and span.overlaps(frame):
# If requested, return the part of the frame that is within the
# span, for frames that are *partially* within span or reaching
# over span
start = span.start if frame.start < span.start else frame.start
stop = span.stop if frame.stop > span.stop else frame.stop
yield frame._replace(start=start, stop=stop)
def span(self, start, stop):
return Span(start, stop)
| 29.27957
| 79
| 0.565736
|
import uuid
import arrow
from collections import namedtuple
HEADERS = ('start', 'stop', 'project', 'id', 'tags', 'updated_at')
class Frame(namedtuple('Frame', HEADERS)):
def __new__(cls, start, stop, project, id, tags=None, updated_at=None,):
try:
if not isinstance(start, arrow.Arrow):
start = arrow.get(start)
if not isinstance(stop, arrow.Arrow):
stop = arrow.get(stop)
if updated_at is None:
updated_at = arrow.utcnow()
elif not isinstance(updated_at, arrow.Arrow):
updated_at = arrow.get(updated_at)
except (ValueError, TypeError) as e:
from .watson import WatsonError
raise WatsonError("Error converting date: {}".format(e))
start = start.to('local')
stop = stop.to('local')
if tags is None:
tags = []
return super(Frame, cls).__new__(
cls, start, stop, project, id, tags, updated_at
)
def dump(self):
start = self.start.to('utc').int_timestamp
stop = self.stop.to('utc').int_timestamp
updated_at = self.updated_at.int_timestamp
return (start, stop, self.project, self.id, self.tags, updated_at)
@property
def day(self):
return self.start.floor('day')
def __lt__(self, other):
return self.start < other.start
def __lte__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __gte__(self, other):
return self.start >= other.start
class Span(object):
def __init__(self, start, stop, timeframe='day'):
self.timeframe = timeframe
self.start = start.floor(self.timeframe)
self.stop = stop.ceil(self.timeframe)
def overlaps(self, frame):
return frame.start <= self.stop and frame.stop >= self.start
def __contains__(self, frame):
return frame.start >= self.start and frame.stop <= self.stop
class Frames(object):
def __init__(self, frames=None):
if not frames:
frames = []
rows = [Frame(*frame) for frame in frames]
self._rows = rows
self.changed = False
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
if key in HEADERS:
return tuple(self._get_col(key))
elif isinstance(key, int):
return self._rows[key]
else:
return self._rows[self._get_index_by_id(key)]
def __setitem__(self, key, value):
self.changed = True
if isinstance(value, Frame):
frame = value
else:
frame = self.new_frame(*value)
if isinstance(key, int):
self._rows[key] = frame
else:
frame = frame._replace(id=key)
try:
self._rows[self._get_index_by_id(key)] = frame
except KeyError:
self._rows.append(frame)
def __delitem__(self, key):
self.changed = True
if isinstance(key, int):
del self._rows[key]
else:
del self._rows[self._get_index_by_id(key)]
def _get_index_by_id(self, id):
try:
return next(
i for i, v in enumerate(self['id']) if v.startswith(id)
)
except StopIteration:
raise KeyError("Frame with id {} not found.".format(id))
def _get_col(self, col):
index = HEADERS.index(col)
for row in self._rows:
yield row[index]
def add(self, *args, **kwargs):
self.changed = True
frame = self.new_frame(*args, **kwargs)
self._rows.append(frame)
return frame
def new_frame(self, project, start, stop, tags=None, id=None,
updated_at=None):
if not id:
id = uuid.uuid4().hex
return Frame(start, stop, project, id, tags=tags,
updated_at=updated_at)
def dump(self):
return tuple(frame.dump() for frame in self._rows)
def filter(
self,
projects=None,
tags=None,
ignore_projects=None,
ignore_tags=None,
span=None,
include_partial_frames=False,
):
for frame in self._rows:
if projects is not None and frame.project not in projects:
continue
if ignore_projects is not None and\
frame.project in ignore_projects:
continue
if tags is not None and not any(tag in frame.tags for tag in tags):
continue
if ignore_tags is not None and\
any(tag in frame.tags for tag in ignore_tags):
continue
if span is None:
yield frame
elif frame in span:
yield frame
elif include_partial_frames and span.overlaps(frame):
start = span.start if frame.start < span.start else frame.start
stop = span.stop if frame.stop > span.stop else frame.stop
yield frame._replace(start=start, stop=stop)
def span(self, start, stop):
return Span(start, stop)
| true
| true
|
79037b9e891cf174cb460169069b116e1a5bd27f
| 332
|
py
|
Python
|
platform/src/main/python/dlpx/virtualization/platform/util.py
|
SumoSourabh/virtualization-sdk
|
d1c06e7aeb8adf48243599871423922d642d2c10
|
[
"Apache-2.0"
] | null | null | null |
platform/src/main/python/dlpx/virtualization/platform/util.py
|
SumoSourabh/virtualization-sdk
|
d1c06e7aeb8adf48243599871423922d642d2c10
|
[
"Apache-2.0"
] | null | null | null |
platform/src/main/python/dlpx/virtualization/platform/util.py
|
SumoSourabh/virtualization-sdk
|
d1c06e7aeb8adf48243599871423922d642d2c10
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2019, 2021 by Delphix. All rights reserved.
#
import dlpx.virtualization.api
from dlpx.virtualization.common.util import to_str
def get_virtualization_api_version():
"""Returns the Virutalization API version string.
:return: version string
"""
return to_str(dlpx.virtualization.api.__version__)
| 23.714286
| 59
| 0.756024
|
import dlpx.virtualization.api
from dlpx.virtualization.common.util import to_str
def get_virtualization_api_version():
return to_str(dlpx.virtualization.api.__version__)
| true
| true
|
79037bbe91970d86c9ed007208185ba6a65de400
| 1,168
|
py
|
Python
|
setup.py
|
Omarnabk/requests_tor
|
6a7e16942eca66945e783a9bb7acac0b9ea6f190
|
[
"MIT"
] | 1
|
2021-06-06T23:41:37.000Z
|
2021-06-06T23:41:37.000Z
|
setup.py
|
Omarnabk/requests_tor
|
6a7e16942eca66945e783a9bb7acac0b9ea6f190
|
[
"MIT"
] | null | null | null |
setup.py
|
Omarnabk/requests_tor
|
6a7e16942eca66945e783a9bb7acac0b9ea6f190
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from requests_tor import __version__
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="requests_tor",
version=__version__,
author="deedy5",
description="Multithreading requests via TOR with automatic TOR new identity",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deedy5/requests_tor",
license="MIT",
py_modules=["requests_tor"],
install_requires=["requests>=2.25.0", "stem>=1.8.0"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires=">=3.6",
zip_safe=False,
)
| 35.393939
| 82
| 0.638699
|
from setuptools import setup
from requests_tor import __version__
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="requests_tor",
version=__version__,
author="deedy5",
description="Multithreading requests via TOR with automatic TOR new identity",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deedy5/requests_tor",
license="MIT",
py_modules=["requests_tor"],
install_requires=["requests>=2.25.0", "stem>=1.8.0"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires=">=3.6",
zip_safe=False,
)
| true
| true
|
79037c714c309d5a2482e456f279f42d7b0982ed
| 2,060
|
py
|
Python
|
nyamuk/event.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 49
|
2015-01-27T15:06:31.000Z
|
2022-02-18T13:51:48.000Z
|
nyamuk/event.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 10
|
2015-03-19T13:24:33.000Z
|
2019-03-01T10:06:23.000Z
|
nyamuk/event.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 19
|
2015-01-27T15:13:29.000Z
|
2021-05-23T13:43:52.000Z
|
"""Nyamuk event."""
import socket
import nyamuk_const as NC
#mqtt event
EV_CONNACK = NC.CMD_CONNACK
EV_PUBLISH = NC.CMD_PUBLISH
EV_SUBACK = NC.CMD_SUBACK
#non mqtt event
EV_NET_ERR = 1000
class BaseEvent:
"""Event Base Class."""
def __init__(self, tipe):
self.type = tipe
class EventConnack(BaseEvent):
"""CONNACK received."""
def __init__(self, ret_code, session_present = 0):
BaseEvent.__init__(self, NC.CMD_CONNACK)
self.ret_code = ret_code
# v3.1.1 only
self.session_present = session_present
class EventPublish(BaseEvent):
"""PUBLISH received."""
def __init__(self, msg):
BaseEvent.__init__(self, NC.CMD_PUBLISH)
self.msg = msg
class EventSuback(BaseEvent):
"""SUBACK received."""
def __init__(self, mid, granted_qos):
BaseEvent.__init__(self, NC.CMD_SUBACK)
self.mid = mid
self.granted_qos = granted_qos
class EventUnsuback(BaseEvent):
"""UNSUBACK received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_UNSUBACK)
self.mid = mid
class EventPuback(BaseEvent):
"""PUBACK received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBACK)
self.mid = mid
class EventPubrec(BaseEvent):
"""PUBREC received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREC)
self.mid = mid
class EventPubrel(BaseEvent):
"""PUBREL received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREL)
self.mid = mid
class EventPubcomp(BaseEvent):
"""PUBCOMP received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBCOMP)
self.mid = mid
class EventNeterr(BaseEvent):
"""Network error event."""
def __init__(self, errnum, msg):
BaseEvent.__init__(self, EV_NET_ERR)
self.errnum = errnum
self.msg = msg
class EventPingResp(BaseEvent):
"""PINGRESP received."""
def __init__(self):
BaseEvent.__init__(self, NC.CMD_PINGRESP)
| 25.432099
| 54
| 0.653883
|
import socket
import nyamuk_const as NC
EV_CONNACK = NC.CMD_CONNACK
EV_PUBLISH = NC.CMD_PUBLISH
EV_SUBACK = NC.CMD_SUBACK
EV_NET_ERR = 1000
class BaseEvent:
def __init__(self, tipe):
self.type = tipe
class EventConnack(BaseEvent):
def __init__(self, ret_code, session_present = 0):
BaseEvent.__init__(self, NC.CMD_CONNACK)
self.ret_code = ret_code
self.session_present = session_present
class EventPublish(BaseEvent):
def __init__(self, msg):
BaseEvent.__init__(self, NC.CMD_PUBLISH)
self.msg = msg
class EventSuback(BaseEvent):
def __init__(self, mid, granted_qos):
BaseEvent.__init__(self, NC.CMD_SUBACK)
self.mid = mid
self.granted_qos = granted_qos
class EventUnsuback(BaseEvent):
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_UNSUBACK)
self.mid = mid
class EventPuback(BaseEvent):
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBACK)
self.mid = mid
class EventPubrec(BaseEvent):
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREC)
self.mid = mid
class EventPubrel(BaseEvent):
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREL)
self.mid = mid
class EventPubcomp(BaseEvent):
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBCOMP)
self.mid = mid
class EventNeterr(BaseEvent):
def __init__(self, errnum, msg):
BaseEvent.__init__(self, EV_NET_ERR)
self.errnum = errnum
self.msg = msg
class EventPingResp(BaseEvent):
def __init__(self):
BaseEvent.__init__(self, NC.CMD_PINGRESP)
| true
| true
|
79037d241ec08af839b4c82afef6de7b86a8c0ee
| 644
|
py
|
Python
|
wagtail_review/text.py
|
icanbwell/wagtail-review
|
4695f59c9feb94974ceb4a1b03ce8fd836e0ea3e
|
[
"BSD-3-Clause"
] | 44
|
2018-12-17T16:37:16.000Z
|
2022-03-06T15:09:23.000Z
|
wagtail_review/text.py
|
icanbwell/wagtail-review
|
4695f59c9feb94974ceb4a1b03ce8fd836e0ea3e
|
[
"BSD-3-Clause"
] | 33
|
2019-01-07T18:03:14.000Z
|
2021-12-15T08:46:57.000Z
|
wagtail_review/text.py
|
icanbwell/wagtail-review
|
4695f59c9feb94974ceb4a1b03ce8fd836e0ea3e
|
[
"BSD-3-Clause"
] | 19
|
2019-01-08T14:08:15.000Z
|
2021-10-19T03:16:30.000Z
|
def user_display_name(user):
"""
Returns the preferred display name for the given user object: the result of
user.get_full_name() if implemented and non-empty, or user.get_username() otherwise.
"""
try:
full_name = user.get_full_name().strip()
if full_name:
return full_name
except AttributeError:
pass
try:
return user.get_username()
except AttributeError:
# we were passed None or something else that isn't a valid user object; return
# empty string to replicate the behaviour of {{ user.get_full_name|default:user.get_username }}
return ''
| 33.894737
| 103
| 0.661491
|
def user_display_name(user):
try:
full_name = user.get_full_name().strip()
if full_name:
return full_name
except AttributeError:
pass
try:
return user.get_username()
except AttributeError:
# empty string to replicate the behaviour of {{ user.get_full_name|default:user.get_username }}
return ''
| true
| true
|
79037d48c657f80b96e603e33494b0d2e714af9a
| 2,989
|
py
|
Python
|
ssim.py
|
ebartrum/NovelViewSynthesis-TensorFlow
|
95be44737dd2f0b96cde61fbd9c1d3c88ae49830
|
[
"MIT"
] | 192
|
2018-09-06T21:27:11.000Z
|
2022-02-15T09:15:34.000Z
|
ssim.py
|
RealityTracer/Multiview2Novelview
|
a5e236f3c564bf287c8a09d855fd2134ba86b299
|
[
"MIT"
] | 18
|
2018-09-11T02:32:40.000Z
|
2020-12-03T08:54:00.000Z
|
ssim.py
|
RealityTracer/Multiview2Novelview
|
a5e236f3c564bf287c8a09d855fd2134ba86b299
|
[
"MIT"
] | 39
|
2018-09-07T01:28:20.000Z
|
2022-01-09T05:54:09.000Z
|
import tensorflow as tf
import numpy as np
def _tf_fspecial_gauss(size, sigma, ch=1):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
g = tf.tile(g, [1, 1, ch, 1])
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=0.5):
img1 = tf.image.rgb_to_grayscale(img1)
img2 = tf.image.rgb_to_grayscale(img2)
window = _tf_fspecial_gauss(size, sigma,
ch=img1.get_shape().as_list()[-1]) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_mu2
if cs_map:
value = (
((2*mu1_mu2 + C1) * (2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
), (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
| 35.583333
| 95
| 0.571429
|
import tensorflow as tf
import numpy as np
def _tf_fspecial_gauss(size, sigma, ch=1):
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
g = tf.tile(g, [1, 1, ch, 1])
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=0.5):
img1 = tf.image.rgb_to_grayscale(img1)
img2 = tf.image.rgb_to_grayscale(img2)
window = _tf_fspecial_gauss(size, sigma,
ch=img1.get_shape().as_list()[-1])
K1 = 0.01
K2 = 0.03
L = 1
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_mu2
if cs_map:
value = (
((2*mu1_mu2 + C1) * (2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
), (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
| true
| true
|
79037d6b66461bcf5551085f73168f1654c65ff5
| 45
|
py
|
Python
|
WebVisualizations/WeatherPy/config.py
|
IrinaUX/Web-Design-Challenge
|
3dac80633a56322eecd93f031bcf652b4c848969
|
[
"ADSL"
] | null | null | null |
WebVisualizations/WeatherPy/config.py
|
IrinaUX/Web-Design-Challenge
|
3dac80633a56322eecd93f031bcf652b4c848969
|
[
"ADSL"
] | null | null | null |
WebVisualizations/WeatherPy/config.py
|
IrinaUX/Web-Design-Challenge
|
3dac80633a56322eecd93f031bcf652b4c848969
|
[
"ADSL"
] | null | null | null |
api_key = "8d3fef753a916acc8df61a629cda8e70"
| 22.5
| 44
| 0.866667
|
api_key = "8d3fef753a916acc8df61a629cda8e70"
| true
| true
|
79037e1e17f1c6ca60aa131870960c78b72347d3
| 1,815
|
py
|
Python
|
assets/netlists/opsci_profit_sharing/SimStrategy.py
|
opscientia/darcspice
|
3f0602a45c829127d552593d2d4c7c5646629136
|
[
"Apache-2.0"
] | 6
|
2022-01-04T16:27:49.000Z
|
2022-03-19T02:57:20.000Z
|
assets/netlists/opsci_profit_sharing/SimStrategy.py
|
opscientia/tokenspice
|
3f0602a45c829127d552593d2d4c7c5646629136
|
[
"Apache-2.0"
] | 1
|
2022-01-04T18:09:44.000Z
|
2022-01-04T18:10:38.000Z
|
assets/netlists/opsci_profit_sharing/SimStrategy.py
|
opscientia/darcspice
|
3f0602a45c829127d552593d2d4c7c5646629136
|
[
"Apache-2.0"
] | null | null | null |
import math
from enforce_typing import enforce_types
from engine import SimStrategyBase
from util.constants import S_PER_HOUR
@enforce_types
class SimStrategy(SimStrategyBase.SimStrategyBase):
def __init__(self, no_researchers=2):
#===initialize self.time_step, max_ticks====
super().__init__()
#===set base-class values we want for this netlist====
self.setTimeStep(S_PER_HOUR)
self.setMaxTime(30, 'years') #typical runs: 10 years, 20 years, 150 years
#===new attributes specific to this netlist===
self.TICKS_BETWEEN_PROPOSALS = 6480
self.PRICE_OF_ASSETS = 1000 # OCEAN
self.RATIO_FUNDS_TO_PUBLISH = 0.4 # 40% of grant funding will go towards "doing work" & publishing
self.TRANSACTION_FEES = 0.1
self.FEES_TO_STAKERS = 0.2
self.NUMBER_OF_RESEARCHERS = no_researchers
self.FUNDING_BOUNDARY = 10000
'''
Some additional parameters that will enable more experimentation (not currently in use)
'''
self.FUNDING_TIME_DEPENDENCE = True # meaning that TICKS_BETWEEN_PROPOSALS should be used
self.PROPOSALS_FUNDED_AT_A_TIME = 1 # this would be used if FUNDING_TIME_DEPENDENCE = False, <=> funding as projects finish
self.PROPOSAL_SETUP = {'grant_requested': 1000, # can be used as a parameter in ResearcherAgent in SimState
'assets_generated': 1,
'no_researchers': 10}
self.TREASURY = 'dao_treasury'
# DT parameters
self.DT_init = 100.0
# DATA TOKEN COMPATIBILITY WIP
# # pool
# self.DT_stake = 20.0
# self.pool_weight_DT = 3.0
# self.pool_weight_OCEAN = 7.0
# assert (self.pool_weight_DT + self.pool_weight_OCEAN) == 10.0
| 42.209302
| 131
| 0.656749
|
import math
from enforce_typing import enforce_types
from engine import SimStrategyBase
from util.constants import S_PER_HOUR
@enforce_types
class SimStrategy(SimStrategyBase.SimStrategyBase):
def __init__(self, no_researchers=2):
super().__init__()
self.setTimeStep(S_PER_HOUR)
self.setMaxTime(30, 'years')
self.TICKS_BETWEEN_PROPOSALS = 6480
self.PRICE_OF_ASSETS = 1000
self.RATIO_FUNDS_TO_PUBLISH = 0.4
self.TRANSACTION_FEES = 0.1
self.FEES_TO_STAKERS = 0.2
self.NUMBER_OF_RESEARCHERS = no_researchers
self.FUNDING_BOUNDARY = 10000
self.FUNDING_TIME_DEPENDENCE = True
self.PROPOSALS_FUNDED_AT_A_TIME = 1
self.PROPOSAL_SETUP = {'grant_requested': 1000,
'assets_generated': 1,
'no_researchers': 10}
self.TREASURY = 'dao_treasury'
self.DT_init = 100.0
| true
| true
|
79037ee8f12f6bab377c6a3f1c28abdcb9147e8b
| 240
|
py
|
Python
|
DTOs/TopicDTO.py
|
AngelStoyanov33/Flask-Forum
|
055e4555dad8588437bf242bf9c6ea97941e69fe
|
[
"MIT"
] | null | null | null |
DTOs/TopicDTO.py
|
AngelStoyanov33/Flask-Forum
|
055e4555dad8588437bf242bf9c6ea97941e69fe
|
[
"MIT"
] | null | null | null |
DTOs/TopicDTO.py
|
AngelStoyanov33/Flask-Forum
|
055e4555dad8588437bf242bf9c6ea97941e69fe
|
[
"MIT"
] | null | null | null |
class TopicDTO:
name = str
description = str
popularity = int
def __init__(self, name="", popularity=0, description = ""):
self.name=name
self.popularity=popularity
self.description = description
| 26.666667
| 64
| 0.625
|
class TopicDTO:
name = str
description = str
popularity = int
def __init__(self, name="", popularity=0, description = ""):
self.name=name
self.popularity=popularity
self.description = description
| true
| true
|
79037f5e28f26dbd8044bd57561c7e3094ce84b6
| 371
|
py
|
Python
|
experiments/5_norming_object_typicality_phrasing1/results/scripts/makeItemList.py
|
thegricean/overinformativeness
|
d20b66148c13af473b57cc4d1736191a49660349
|
[
"MIT"
] | 1
|
2016-10-27T18:41:57.000Z
|
2016-10-27T18:41:57.000Z
|
experiments/5_norming_object_typicality_phrasing1/results/scripts/makeItemList.py
|
thegricean/overinformativeness
|
d20b66148c13af473b57cc4d1736191a49660349
|
[
"MIT"
] | 9
|
2015-11-30T21:44:31.000Z
|
2020-04-21T01:26:05.000Z
|
experiments/5_norming_object_typicality_phrasing1/results/scripts/makeItemList.py
|
thegricean/overinformativeness
|
d20b66148c13af473b57cc4d1736191a49660349
|
[
"MIT"
] | 2
|
2015-11-25T09:53:20.000Z
|
2017-03-17T21:51:18.000Z
|
import os
imagedir = "/Users/titlis/cogsci/projects/stanford/projects/overinformativeness/experiments/5_norming_object_typicality/images"
for t in os.listdir(imagedir):
if not t.startswith("."):
for i in os.listdir(imagedir+"/"+t):
if not i.startswith("."):
print "{"
print "\"item\": \""+i[0:-4]+"\","
print "\"objecttype\": \""+t+"\""
print "},"
| 30.916667
| 127
| 0.638814
|
import os
imagedir = "/Users/titlis/cogsci/projects/stanford/projects/overinformativeness/experiments/5_norming_object_typicality/images"
for t in os.listdir(imagedir):
if not t.startswith("."):
for i in os.listdir(imagedir+"/"+t):
if not i.startswith("."):
print "{"
print "\"item\": \""+i[0:-4]+"\","
print "\"objecttype\": \""+t+"\""
print "},"
| false
| true
|
79038257e50b1885b97826cd892b492c07a4b5f2
| 16,449
|
py
|
Python
|
ViT-V-Net/models.py
|
junyuchen245/ViT-V-Net_for_3D_Image_Registration_Pytorch
|
f43bcdeef1d6712dfcaa3b4e18f69474e1eeaf73
|
[
"MIT"
] | 131
|
2021-04-07T03:30:08.000Z
|
2022-03-20T04:09:01.000Z
|
ViT-V-Net/models.py
|
junyuchen245/ViT-V-Net_for_3D_Image_Registration
|
f43bcdeef1d6712dfcaa3b4e18f69474e1eeaf73
|
[
"MIT"
] | 4
|
2021-04-26T09:09:26.000Z
|
2022-03-10T05:29:29.000Z
|
ViT-V-Net/models.py
|
junyuchen245/ViT-V-Net_for_3D_Image_Registration
|
f43bcdeef1d6712dfcaa3b4e18f69474e1eeaf73
|
[
"MIT"
] | 20
|
2021-04-15T02:19:24.000Z
|
2022-03-14T10:10:53.000Z
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as nnf
from torch.nn import Dropout, Softmax, Linear, Conv3d, LayerNorm
from torch.nn.modules.utils import _pair, _triple
import configs as configs
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Attention(nn.Module):
def __init__(self, config, vis):
super(Attention, self).__init__()
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.out = Linear(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size):
super(Embeddings, self).__init__()
self.config = config
down_factor = config.down_factor
patch_size = _triple(config.patches["size"])
n_patches = int((img_size[0]/2**down_factor// patch_size[0]) * (img_size[1]/2**down_factor// patch_size[1]) * (img_size[2]/2**down_factor// patch_size[2]))
self.hybrid_model = CNNEncoder(config, n_channels=2)
in_channels = config['encoder_channels'][-1]
self.patch_embeddings = Conv3d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
x, features = self.hybrid_model(x)
x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2))
x = x.flatten(2)
x = x.transpose(-1, -2) # (B, n_patches, hidden)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, features
class Block(nn.Module):
def __init__(self, config, vis):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis)
def forward(self, x):
h = x
x = self.attention_norm(x)
x, weights = self.attn(x)
x = x + h
h = x
x = self.ffn_norm(x)
x = self.ffn(x)
x = x + h
return x, weights
class Encoder(nn.Module):
def __init__(self, config, vis):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(config.transformer["num_layers"]):
layer = Block(config, vis)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states):
attn_weights = []
for layer_block in self.layer:
hidden_states, weights = layer_block(hidden_states)
if self.vis:
attn_weights.append(weights)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis)
def forward(self, input_ids):
embedding_output, features = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden)
return encoded, attn_weights, features
class Conv3dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
conv = nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
bn = nn.BatchNorm3d(out_channels)
super(Conv3dReLU, self).__init__(conv, bn, relu)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
skip_channels=0,
use_batchnorm=True,
):
super().__init__()
self.conv1 = Conv3dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.conv2 = Conv3dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
def forward(self, x, skip=None):
x = self.up(x)
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.conv1(x)
x = self.conv2(x)
return x
class DecoderCup(nn.Module):
def __init__(self, config, img_size):
super().__init__()
self.config = config
self.down_factor = config.down_factor
head_channels = config.conv_first_channel
self.img_size = img_size
self.conv_more = Conv3dReLU(
config.hidden_size,
head_channels,
kernel_size=3,
padding=1,
use_batchnorm=True,
)
decoder_channels = config.decoder_channels
in_channels = [head_channels] + list(decoder_channels[:-1])
out_channels = decoder_channels
self.patch_size = _triple(config.patches["size"])
skip_channels = self.config.skip_channels
blocks = [
DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, hidden_states, features=None):
B, n_patch, hidden = hidden_states.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden)
l, h, w = (self.img_size[0]//2**self.down_factor//self.patch_size[0]), (self.img_size[1]//2**self.down_factor//self.patch_size[1]), (self.img_size[2]//2**self.down_factor//self.patch_size[2])
x = hidden_states.permute(0, 2, 1)
x = x.contiguous().view(B, hidden, l, h, w)
x = self.conv_more(x)
for i, decoder_block in enumerate(self.blocks):
if features is not None:
skip = features[i] if (i < self.config.n_skip) else None
#print(skip.shape)
else:
skip = None
x = decoder_block(x, skip=skip)
return x
class SpatialTransformer(nn.Module):
"""
N-D Spatial Transformer
Obtained from https://github.com/voxelmorph/voxelmorph
"""
def __init__(self, size, mode='bilinear'):
super().__init__()
self.mode = mode
# create sampling grid
vectors = [torch.arange(0, s) for s in size]
grids = torch.meshgrid(vectors)
grid = torch.stack(grids)
grid = torch.unsqueeze(grid, 0)
grid = grid.type(torch.FloatTensor)
# registering the grid as a buffer cleanly moves it to the GPU, but it also
# adds it to the state dict. this is annoying since everything in the state dict
# is included when saving weights to disk, so the model files are way bigger
# than they need to be. so far, there does not appear to be an elegant solution.
# see: https://discuss.pytorch.org/t/how-to-register-buffer-without-polluting-state-dict
self.register_buffer('grid', grid)
def forward(self, src, flow):
# new locations
new_locs = self.grid + flow
shape = flow.shape[2:]
# need to normalize grid values to [-1, 1] for resampler
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
# move channels dim to last position
# also not sure why, but the channels need to be reversed
if len(shape) == 2:
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
elif len(shape) == 3:
new_locs = new_locs.permute(0, 2, 3, 4, 1)
new_locs = new_locs[..., [2, 1, 0]]
return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool3d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class CNNEncoder(nn.Module):
def __init__(self, config, n_channels=2):
super(CNNEncoder, self).__init__()
self.n_channels = n_channels
decoder_channels = config.decoder_channels
encoder_channels = config.encoder_channels
self.down_num = config.down_num
self.inc = DoubleConv(n_channels, encoder_channels[0])
self.down1 = Down(encoder_channels[0], encoder_channels[1])
self.down2 = Down(encoder_channels[1], encoder_channels[2])
self.width = encoder_channels[-1]
def forward(self, x):
features = []
x1 = self.inc(x)
features.append(x1)
x2 = self.down1(x1)
features.append(x2)
feats = self.down2(x2)
features.append(feats)
feats_down = feats
for i in range(self.down_num):
feats_down = nn.MaxPool3d(2)(feats_down)
features.append(feats_down)
return feats, features[::-1]
class RegistrationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1):
conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)
conv3d.weight = nn.Parameter(Normal(0, 1e-5).sample(conv3d.weight.shape))
conv3d.bias = nn.Parameter(torch.zeros(conv3d.bias.shape))
super().__init__(conv3d)
class ViTVNet(nn.Module):
def __init__(self, config, img_size=(64, 256, 256), int_steps=7, vis=False):
super(ViTVNet, self).__init__()
self.transformer = Transformer(config, img_size, vis)
self.decoder = DecoderCup(config, img_size)
self.reg_head = RegistrationHead(
in_channels=config.decoder_channels[-1],
out_channels=config['n_dims'],
kernel_size=3,
)
self.spatial_trans = SpatialTransformer(img_size)
self.config = config
#self.integrate = VecInt(img_size, int_steps)
def forward(self, x):
source = x[:,0:1,:,:]
x, attn_weights, features = self.transformer(x) # (B, n_patch, hidden)
x = self.decoder(x, features)
flow = self.reg_head(x)
#flow = self.integrate(flow)
out = self.spatial_trans(source, flow)
return out, flow
class VecInt(nn.Module):
"""
Integrates a vector field via scaling and squaring.
Obtained from https://github.com/voxelmorph/voxelmorph
"""
def __init__(self, inshape, nsteps):
super().__init__()
assert nsteps >= 0, 'nsteps should be >= 0, found: %d' % nsteps
self.nsteps = nsteps
self.scale = 1.0 / (2 ** self.nsteps)
self.transformer = SpatialTransformer(inshape)
def forward(self, vec):
vec = vec * self.scale
for _ in range(self.nsteps):
vec = vec + self.transformer(vec, vec)
return vec
CONFIGS = {
'ViT-V-Net': configs.get_3DReg_config(),
}
| 36.311258
| 200
| 0.610615
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as nnf
from torch.nn import Dropout, Softmax, Linear, Conv3d, LayerNorm
from torch.nn.modules.utils import _pair, _triple
import configs as configs
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
def np2th(weights, conv=False):
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Attention(nn.Module):
def __init__(self, config, vis):
super(Attention, self).__init__()
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.out = Linear(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
def __init__(self, config, img_size):
super(Embeddings, self).__init__()
self.config = config
down_factor = config.down_factor
patch_size = _triple(config.patches["size"])
n_patches = int((img_size[0]/2**down_factor// patch_size[0]) * (img_size[1]/2**down_factor// patch_size[1]) * (img_size[2]/2**down_factor// patch_size[2]))
self.hybrid_model = CNNEncoder(config, n_channels=2)
in_channels = config['encoder_channels'][-1]
self.patch_embeddings = Conv3d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
x, features = self.hybrid_model(x)
x = self.patch_embeddings(x)
x = x.flatten(2)
x = x.transpose(-1, -2)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, features
class Block(nn.Module):
def __init__(self, config, vis):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis)
def forward(self, x):
h = x
x = self.attention_norm(x)
x, weights = self.attn(x)
x = x + h
h = x
x = self.ffn_norm(x)
x = self.ffn(x)
x = x + h
return x, weights
class Encoder(nn.Module):
def __init__(self, config, vis):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(config.transformer["num_layers"]):
layer = Block(config, vis)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states):
attn_weights = []
for layer_block in self.layer:
hidden_states, weights = layer_block(hidden_states)
if self.vis:
attn_weights.append(weights)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis)
def forward(self, input_ids):
embedding_output, features = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output)
return encoded, attn_weights, features
class Conv3dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
conv = nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
bn = nn.BatchNorm3d(out_channels)
super(Conv3dReLU, self).__init__(conv, bn, relu)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
skip_channels=0,
use_batchnorm=True,
):
super().__init__()
self.conv1 = Conv3dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.conv2 = Conv3dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
def forward(self, x, skip=None):
x = self.up(x)
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.conv1(x)
x = self.conv2(x)
return x
class DecoderCup(nn.Module):
def __init__(self, config, img_size):
super().__init__()
self.config = config
self.down_factor = config.down_factor
head_channels = config.conv_first_channel
self.img_size = img_size
self.conv_more = Conv3dReLU(
config.hidden_size,
head_channels,
kernel_size=3,
padding=1,
use_batchnorm=True,
)
decoder_channels = config.decoder_channels
in_channels = [head_channels] + list(decoder_channels[:-1])
out_channels = decoder_channels
self.patch_size = _triple(config.patches["size"])
skip_channels = self.config.skip_channels
blocks = [
DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, hidden_states, features=None):
B, n_patch, hidden = hidden_states.size()
l, h, w = (self.img_size[0]//2**self.down_factor//self.patch_size[0]), (self.img_size[1]//2**self.down_factor//self.patch_size[1]), (self.img_size[2]//2**self.down_factor//self.patch_size[2])
x = hidden_states.permute(0, 2, 1)
x = x.contiguous().view(B, hidden, l, h, w)
x = self.conv_more(x)
for i, decoder_block in enumerate(self.blocks):
if features is not None:
skip = features[i] if (i < self.config.n_skip) else None
else:
skip = None
x = decoder_block(x, skip=skip)
return x
class SpatialTransformer(nn.Module):
def __init__(self, size, mode='bilinear'):
super().__init__()
self.mode = mode
vectors = [torch.arange(0, s) for s in size]
grids = torch.meshgrid(vectors)
grid = torch.stack(grids)
grid = torch.unsqueeze(grid, 0)
grid = grid.type(torch.FloatTensor)
self.register_buffer('grid', grid)
def forward(self, src, flow):
new_locs = self.grid + flow
shape = flow.shape[2:]
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
if len(shape) == 2:
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
elif len(shape) == 3:
new_locs = new_locs.permute(0, 2, 3, 4, 1)
new_locs = new_locs[..., [2, 1, 0]]
return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool3d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class CNNEncoder(nn.Module):
def __init__(self, config, n_channels=2):
super(CNNEncoder, self).__init__()
self.n_channels = n_channels
decoder_channels = config.decoder_channels
encoder_channels = config.encoder_channels
self.down_num = config.down_num
self.inc = DoubleConv(n_channels, encoder_channels[0])
self.down1 = Down(encoder_channels[0], encoder_channels[1])
self.down2 = Down(encoder_channels[1], encoder_channels[2])
self.width = encoder_channels[-1]
def forward(self, x):
features = []
x1 = self.inc(x)
features.append(x1)
x2 = self.down1(x1)
features.append(x2)
feats = self.down2(x2)
features.append(feats)
feats_down = feats
for i in range(self.down_num):
feats_down = nn.MaxPool3d(2)(feats_down)
features.append(feats_down)
return feats, features[::-1]
class RegistrationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1):
conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)
conv3d.weight = nn.Parameter(Normal(0, 1e-5).sample(conv3d.weight.shape))
conv3d.bias = nn.Parameter(torch.zeros(conv3d.bias.shape))
super().__init__(conv3d)
class ViTVNet(nn.Module):
def __init__(self, config, img_size=(64, 256, 256), int_steps=7, vis=False):
super(ViTVNet, self).__init__()
self.transformer = Transformer(config, img_size, vis)
self.decoder = DecoderCup(config, img_size)
self.reg_head = RegistrationHead(
in_channels=config.decoder_channels[-1],
out_channels=config['n_dims'],
kernel_size=3,
)
self.spatial_trans = SpatialTransformer(img_size)
self.config = config
def forward(self, x):
source = x[:,0:1,:,:]
x, attn_weights, features = self.transformer(x)
x = self.decoder(x, features)
flow = self.reg_head(x)
out = self.spatial_trans(source, flow)
return out, flow
class VecInt(nn.Module):
def __init__(self, inshape, nsteps):
super().__init__()
assert nsteps >= 0, 'nsteps should be >= 0, found: %d' % nsteps
self.nsteps = nsteps
self.scale = 1.0 / (2 ** self.nsteps)
self.transformer = SpatialTransformer(inshape)
def forward(self, vec):
vec = vec * self.scale
for _ in range(self.nsteps):
vec = vec + self.transformer(vec, vec)
return vec
CONFIGS = {
'ViT-V-Net': configs.get_3DReg_config(),
}
| true
| true
|
7903828c021b859d78b58676fd998ed8a29a8d64
| 8,651
|
py
|
Python
|
corehq/apps/data_interfaces/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/data_interfaces/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/data_interfaces/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timedelta
from typing import List, Optional
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import ugettext as _
from celery.schedules import crontab
from celery.task import periodic_task, task
from celery.utils.log import get_task_logger
from dimagi.utils.couch import CriticalSection
from corehq.apps.domain.models import Domain
from corehq.apps.domain_migration_flags.api import any_migrations_in_progress
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.motech.repeaters.dbaccessors import (
get_couch_repeat_record_ids_by_payload_id,
get_sql_repeat_records_by_payload_id,
iter_repeat_record_ids_by_repeater,
)
from corehq.motech.repeaters.models import SQLRepeatRecord
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.toggles import CASE_DEDUPE, DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from .deduplication import reset_deduplicate_rule, backfill_deduplicate_rule
from .interfaces import FormManagementMode
from .models import (
AUTO_UPDATE_XMLNS,
AutomaticUpdateRule,
CaseDuplicate,
CaseRuleSubmission,
DomainCaseRuleRun,
)
from .utils import (
add_cases_to_case_group,
archive_or_restore_forms,
iter_cases_and_run_rules,
operate_on_payloads,
run_rules_for_case,
)
logger = get_task_logger('data_interfaces')
ONE_HOUR = 60 * 60
def _get_upload_progress_tracker(upload_id):
def _progress_tracker(current, total):
cache.set(upload_id, {
'inProgress': True,
'current': current,
'total': total,
}, ONE_HOUR)
return _progress_tracker
@no_result_task(queue='case_rule_queue', acks_late=True,
soft_time_limit=15 * settings.CELERY_TASK_SOFT_TIME_LIMIT)
def reset_and_backfill_deduplicate_rule_task(domain, rule_id):
if not CASE_DEDUPE.enabled(domain):
return
try:
rule = AutomaticUpdateRule.objects.get(
id=rule_id,
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_DEDUPLICATE,
active=True,
deleted=False,
)
except AutomaticUpdateRule.DoesNotExist:
return
AutomaticUpdateRule.clear_caches(rule.domain, AutomaticUpdateRule.WORKFLOW_DEDUPLICATE)
reset_deduplicate_rule(rule)
backfill_deduplicate_rule(domain, rule)
@task(queue='background_queue')
def delete_duplicates_for_cases(case_ids):
CaseDuplicate.bulk_remove_unique_cases(case_ids)
CaseDuplicate.remove_duplicates_for_case_ids(case_ids)
@task(serializer='pickle', ignore_result=True)
def bulk_upload_cases_to_group(upload_id, domain, case_group_id, cases):
results = add_cases_to_case_group(
domain,
case_group_id,
cases,
progress_tracker=_get_upload_progress_tracker(upload_id)
)
cache.set(upload_id, results, ONE_HOUR)
@task(serializer='pickle')
def bulk_form_management_async(archive_or_restore, domain, couch_user, form_ids):
task = bulk_form_management_async
mode = FormManagementMode(archive_or_restore, validate=True)
if not form_ids:
return {'messages': {'errors': [_('No Forms are supplied')]}}
response = archive_or_restore_forms(domain, couch_user.user_id, couch_user.username, form_ids, mode, task)
return response
@periodic_task(serializer='pickle',
run_every=crontab(hour='*', minute=0),
queue=settings.CELERY_PERIODIC_QUEUE,
ignore_result=True
)
def run_case_update_rules(now=None):
domains = (AutomaticUpdateRule
.objects
.filter(active=True, deleted=False, workflow=AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
.values_list('domain', flat=True)
.distinct()
.order_by('domain'))
hour_to_run = now.hour if now else datetime.utcnow().hour
for domain in domains:
if not any_migrations_in_progress(domain) and not DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK.enabled(domain):
domain_obj = Domain.get_by_name(domain)
if domain_obj.auto_case_update_hour is None:
domain_hour = settings.RULE_UPDATE_HOUR
else:
domain_hour = domain_obj.auto_case_update_hour
if hour_to_run == domain_hour:
run_case_update_rules_for_domain.delay(domain, now)
@task(serializer='pickle', queue='case_rule_queue')
def run_case_update_rules_for_domain(domain, now=None):
now = now or datetime.utcnow()
domain_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
all_rule_case_types = set(domain_rules.values_list('case_type', flat=True))
for case_type in all_rule_case_types:
run_record = DomainCaseRuleRun.objects.create(
domain=domain,
started_on=datetime.utcnow(),
status=DomainCaseRuleRun.STATUS_RUNNING,
case_type=case_type
)
for db in get_db_aliases_for_partitioned_query():
run_case_update_rules_for_domain_and_db.delay(domain, now, run_record.pk, case_type, db=db)
@serial_task(
'{domain}-{case_type}-{db}',
timeout=36 * 60 * 60,
max_retries=0,
queue='case_rule_queue',
)
def run_case_update_rules_for_domain_and_db(domain, now, run_id, case_type, db=None):
all_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
rules = list(all_rules.filter(case_type=case_type))
boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
iterator = AutomaticUpdateRule.iter_cases(domain, case_type, boundary_date, db=db)
run = iter_cases_and_run_rules(domain, iterator, rules, now, run_id, case_type, db)
if run.status == DomainCaseRuleRun.STATUS_FINISHED:
for rule in rules:
AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
@task(serializer='pickle', queue='background_queue', acks_late=True, ignore_result=True)
def run_case_update_rules_on_save(case):
key = 'case-update-on-save-case-{case}'.format(case=case.case_id)
with CriticalSection([key]):
update_case = True
if case.xform_ids:
last_form = FormAccessors(case.domain).get_form(case.xform_ids[-1])
update_case = last_form.xmlns != AUTO_UPDATE_XMLNS
if update_case:
rules = AutomaticUpdateRule.by_domain(case.domain,
AutomaticUpdateRule.WORKFLOW_CASE_UPDATE).filter(case_type=case.type)
now = datetime.utcnow()
run_rules_for_case(case, rules, now)
@periodic_task(run_every=crontab(hour=0, minute=0), queue='case_rule_queue', ignore_result=True)
def delete_old_rule_submission_logs():
start = datetime.utcnow()
max_age = start - timedelta(days=90)
CaseRuleSubmission.objects.filter(created_on__lt=max_age).delete()
@task(serializer='pickle')
def task_operate_on_payloads(
record_ids: List[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
):
return operate_on_payloads(record_ids, domain, action, use_sql,
task=task_operate_on_payloads)
@task(serializer='pickle')
def task_generate_ids_and_operate_on_payloads(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
) -> dict:
repeat_record_ids = _get_repeat_record_ids(payload_id, repeater_id, domain,
use_sql)
return operate_on_payloads(repeat_record_ids, domain, action, use_sql,
task=task_generate_ids_and_operate_on_payloads)
def _get_repeat_record_ids(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
use_sql: bool,
) -> List[str]:
if not payload_id and not repeater_id:
return []
if payload_id:
if use_sql:
records = get_sql_repeat_records_by_payload_id(domain, payload_id)
return [r.id for r in records]
else:
return get_couch_repeat_record_ids_by_payload_id(domain, payload_id)
else:
if use_sql:
queryset = SQLRepeatRecord.objects.filter(
domain=domain,
repeater__repeater_id=repeater_id,
)
return [r['id'] for r in queryset.values('id')]
else:
return list(iter_repeat_record_ids_by_repeater(domain, repeater_id))
| 35.600823
| 114
| 0.715871
|
from datetime import datetime, timedelta
from typing import List, Optional
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import ugettext as _
from celery.schedules import crontab
from celery.task import periodic_task, task
from celery.utils.log import get_task_logger
from dimagi.utils.couch import CriticalSection
from corehq.apps.domain.models import Domain
from corehq.apps.domain_migration_flags.api import any_migrations_in_progress
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.motech.repeaters.dbaccessors import (
get_couch_repeat_record_ids_by_payload_id,
get_sql_repeat_records_by_payload_id,
iter_repeat_record_ids_by_repeater,
)
from corehq.motech.repeaters.models import SQLRepeatRecord
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.toggles import CASE_DEDUPE, DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from .deduplication import reset_deduplicate_rule, backfill_deduplicate_rule
from .interfaces import FormManagementMode
from .models import (
AUTO_UPDATE_XMLNS,
AutomaticUpdateRule,
CaseDuplicate,
CaseRuleSubmission,
DomainCaseRuleRun,
)
from .utils import (
add_cases_to_case_group,
archive_or_restore_forms,
iter_cases_and_run_rules,
operate_on_payloads,
run_rules_for_case,
)
logger = get_task_logger('data_interfaces')
ONE_HOUR = 60 * 60
def _get_upload_progress_tracker(upload_id):
def _progress_tracker(current, total):
cache.set(upload_id, {
'inProgress': True,
'current': current,
'total': total,
}, ONE_HOUR)
return _progress_tracker
@no_result_task(queue='case_rule_queue', acks_late=True,
soft_time_limit=15 * settings.CELERY_TASK_SOFT_TIME_LIMIT)
def reset_and_backfill_deduplicate_rule_task(domain, rule_id):
if not CASE_DEDUPE.enabled(domain):
return
try:
rule = AutomaticUpdateRule.objects.get(
id=rule_id,
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_DEDUPLICATE,
active=True,
deleted=False,
)
except AutomaticUpdateRule.DoesNotExist:
return
AutomaticUpdateRule.clear_caches(rule.domain, AutomaticUpdateRule.WORKFLOW_DEDUPLICATE)
reset_deduplicate_rule(rule)
backfill_deduplicate_rule(domain, rule)
@task(queue='background_queue')
def delete_duplicates_for_cases(case_ids):
CaseDuplicate.bulk_remove_unique_cases(case_ids)
CaseDuplicate.remove_duplicates_for_case_ids(case_ids)
@task(serializer='pickle', ignore_result=True)
def bulk_upload_cases_to_group(upload_id, domain, case_group_id, cases):
results = add_cases_to_case_group(
domain,
case_group_id,
cases,
progress_tracker=_get_upload_progress_tracker(upload_id)
)
cache.set(upload_id, results, ONE_HOUR)
@task(serializer='pickle')
def bulk_form_management_async(archive_or_restore, domain, couch_user, form_ids):
task = bulk_form_management_async
mode = FormManagementMode(archive_or_restore, validate=True)
if not form_ids:
return {'messages': {'errors': [_('No Forms are supplied')]}}
response = archive_or_restore_forms(domain, couch_user.user_id, couch_user.username, form_ids, mode, task)
return response
@periodic_task(serializer='pickle',
run_every=crontab(hour='*', minute=0),
queue=settings.CELERY_PERIODIC_QUEUE,
ignore_result=True
)
def run_case_update_rules(now=None):
domains = (AutomaticUpdateRule
.objects
.filter(active=True, deleted=False, workflow=AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
.values_list('domain', flat=True)
.distinct()
.order_by('domain'))
hour_to_run = now.hour if now else datetime.utcnow().hour
for domain in domains:
if not any_migrations_in_progress(domain) and not DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK.enabled(domain):
domain_obj = Domain.get_by_name(domain)
if domain_obj.auto_case_update_hour is None:
domain_hour = settings.RULE_UPDATE_HOUR
else:
domain_hour = domain_obj.auto_case_update_hour
if hour_to_run == domain_hour:
run_case_update_rules_for_domain.delay(domain, now)
@task(serializer='pickle', queue='case_rule_queue')
def run_case_update_rules_for_domain(domain, now=None):
now = now or datetime.utcnow()
domain_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
all_rule_case_types = set(domain_rules.values_list('case_type', flat=True))
for case_type in all_rule_case_types:
run_record = DomainCaseRuleRun.objects.create(
domain=domain,
started_on=datetime.utcnow(),
status=DomainCaseRuleRun.STATUS_RUNNING,
case_type=case_type
)
for db in get_db_aliases_for_partitioned_query():
run_case_update_rules_for_domain_and_db.delay(domain, now, run_record.pk, case_type, db=db)
@serial_task(
'{domain}-{case_type}-{db}',
timeout=36 * 60 * 60,
max_retries=0,
queue='case_rule_queue',
)
def run_case_update_rules_for_domain_and_db(domain, now, run_id, case_type, db=None):
all_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
rules = list(all_rules.filter(case_type=case_type))
boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
iterator = AutomaticUpdateRule.iter_cases(domain, case_type, boundary_date, db=db)
run = iter_cases_and_run_rules(domain, iterator, rules, now, run_id, case_type, db)
if run.status == DomainCaseRuleRun.STATUS_FINISHED:
for rule in rules:
AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
@task(serializer='pickle', queue='background_queue', acks_late=True, ignore_result=True)
def run_case_update_rules_on_save(case):
key = 'case-update-on-save-case-{case}'.format(case=case.case_id)
with CriticalSection([key]):
update_case = True
if case.xform_ids:
last_form = FormAccessors(case.domain).get_form(case.xform_ids[-1])
update_case = last_form.xmlns != AUTO_UPDATE_XMLNS
if update_case:
rules = AutomaticUpdateRule.by_domain(case.domain,
AutomaticUpdateRule.WORKFLOW_CASE_UPDATE).filter(case_type=case.type)
now = datetime.utcnow()
run_rules_for_case(case, rules, now)
@periodic_task(run_every=crontab(hour=0, minute=0), queue='case_rule_queue', ignore_result=True)
def delete_old_rule_submission_logs():
start = datetime.utcnow()
max_age = start - timedelta(days=90)
CaseRuleSubmission.objects.filter(created_on__lt=max_age).delete()
@task(serializer='pickle')
def task_operate_on_payloads(
record_ids: List[str],
domain: str,
action, se_sql: bool,
):
return operate_on_payloads(record_ids, domain, action, use_sql,
task=task_operate_on_payloads)
@task(serializer='pickle')
def task_generate_ids_and_operate_on_payloads(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
action, se_sql: bool,
) -> dict:
repeat_record_ids = _get_repeat_record_ids(payload_id, repeater_id, domain,
use_sql)
return operate_on_payloads(repeat_record_ids, domain, action, use_sql,
task=task_generate_ids_and_operate_on_payloads)
def _get_repeat_record_ids(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
use_sql: bool,
) -> List[str]:
if not payload_id and not repeater_id:
return []
if payload_id:
if use_sql:
records = get_sql_repeat_records_by_payload_id(domain, payload_id)
return [r.id for r in records]
else:
return get_couch_repeat_record_ids_by_payload_id(domain, payload_id)
else:
if use_sql:
queryset = SQLRepeatRecord.objects.filter(
domain=domain,
repeater__repeater_id=repeater_id,
)
return [r['id'] for r in queryset.values('id')]
else:
return list(iter_repeat_record_ids_by_repeater(domain, repeater_id))
| true
| true
|
790383512eea98d223ed4ed7d48e9d8d7c50f2b1
| 1,254
|
py
|
Python
|
_Sensation0/DeltaTime.py
|
Geson-anko/JARVIS3
|
bc599a352401a7e135ebaabead4d8e6d8835747e
|
[
"MIT"
] | null | null | null |
_Sensation0/DeltaTime.py
|
Geson-anko/JARVIS3
|
bc599a352401a7e135ebaabead4d8e6d8835747e
|
[
"MIT"
] | null | null | null |
_Sensation0/DeltaTime.py
|
Geson-anko/JARVIS3
|
bc599a352401a7e135ebaabead4d8e6d8835747e
|
[
"MIT"
] | null | null | null |
import os
import torch
import os
import random
from torch.nn import(
Module,Linear,LayerNorm
)
import math
from .AutoEncoder import Encoder
class DeltaT(Module):
def __init__(self):
super().__init__()
self.reset_seed()
self.elem = math.prod(Encoder().output_size)
self.input_size = (1,self.elem)
self.output_size = (1,1)
## Model layers
self.dense1 = Linear(self.elem,512)
self.norm1= LayerNorm(512)
self.dense2 = Linear(512,256)
self.norm2 = LayerNorm(256)
self.dense3 = Linear(256,1)
def forward(self,x1,x2):
#x1,x2 = x1.unsqueeze(1),x2.unsqueeze(1)
#x = torch.cat([x1,x2],dim=1)
x = x1 - x2
x = torch.relu(self.norm1(self.dense1(x)))
x = x.view(x.size(0),-1)
x = torch.relu(self.norm2(self.dense2(x)))
x = torch.relu(self.dense3(x))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if __name__ == '__main__':
from torchsummaryX import summary
model = DeltaT()
dummy = torch.randn(model.input_size)
print(summary(model,dummy,dummy))
| 27.26087
| 52
| 0.605263
|
import os
import torch
import os
import random
from torch.nn import(
Module,Linear,LayerNorm
)
import math
from .AutoEncoder import Encoder
class DeltaT(Module):
def __init__(self):
super().__init__()
self.reset_seed()
self.elem = math.prod(Encoder().output_size)
self.input_size = (1,self.elem)
self.output_size = (1,1)
dense1 = Linear(self.elem,512)
self.norm1= LayerNorm(512)
self.dense2 = Linear(512,256)
self.norm2 = LayerNorm(256)
self.dense3 = Linear(256,1)
def forward(self,x1,x2):
x = x1 - x2
x = torch.relu(self.norm1(self.dense1(x)))
x = x.view(x.size(0),-1)
x = torch.relu(self.norm2(self.dense2(x)))
x = torch.relu(self.dense3(x))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if __name__ == '__main__':
from torchsummaryX import summary
model = DeltaT()
dummy = torch.randn(model.input_size)
print(summary(model,dummy,dummy))
| true
| true
|
79038418e5bde28dec129e68980705856ef4a7fe
| 2,622
|
py
|
Python
|
nicenquickplotlib/config_types.py
|
SengerM/nicenquickplotlib
|
9aeebcd07b581598c418dd47593d3c218bca6ebb
|
[
"MIT"
] | 2
|
2018-11-27T18:45:42.000Z
|
2019-02-20T20:53:17.000Z
|
nicenquickplotlib/config_types.py
|
SengerM/nicenquickplotlib
|
9aeebcd07b581598c418dd47593d3c218bca6ebb
|
[
"MIT"
] | null | null | null |
nicenquickplotlib/config_types.py
|
SengerM/nicenquickplotlib
|
9aeebcd07b581598c418dd47593d3c218bca6ebb
|
[
"MIT"
] | 1
|
2021-11-16T06:01:41.000Z
|
2021-11-16T06:01:41.000Z
|
from numbers import Number
import yaml
from .color_tools import hex2rgb
def __default_grid__(ax):
"""This is a temporary function"""
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on() # Enables minor ticks without text, only the ticks.
class FigStyle:
def __init__(self, config_file):
self.__width = None
self.__ratio = None
self.__hspace = None
self.__colors = [None]
self.__linestyles = [None]
self.__markers = [None]
self.__grid = __default_grid__
self.__main_color = None
self.read_config_file(config_file) # This is what actually initializes the values.
@property
def colors(self):
return self.__colors
@property
def width(self):
return self.__width
@property
def ratio(self):
return self.__ratio
@property
def hspace(self):
return self.__hspace
@property
def grid(self):
return self.__grid
@property
def linestyles(self):
return self.__linestyles
@property
def markers(self):
return self.__markers
@property
def main_color(self):
return self.__main_color
def read_config_file(self, filename):
if not isinstance(filename, str):
raise ValueError('"file_name" must be a string')
with open(filename, 'r') as stream:
try:
data = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
if 'width' not in data:
raise ValueError('The "figstyle" file must have a "width" field')
self.__width = float(data['width'])
if 'ratio' not in data:
raise ValueError('The "figstyle" file must have a "ratio" field')
if isinstance(data['ratio'], list) and len(data['ratio']) == 2 and isinstance(data['ratio'][0], Number) and isinstance(data['ratio'][1], Number):
self.__ratio = data['ratio']
else:
raise ValueError('Error reading "' + filename + '": ratio must be a list of two numbers [x_ratio, y_ratio]')
if 'hspace' not in data:
raise ValueError('The "figstyle" file must have a "hspace" field')
self.__hspace = float(data['hspace'])
if isinstance(data['colors'], list):
self.__colors = [None]*len(data['colors'])
for k in range(len(data['colors'])):
self.__colors[k] = hex2rgb(data['colors'][k])
if 'linestyles' in data:
if isinstance(data['linestyles'], list):
self.__linestyles = data['linestyles']
if 'markers' in data:
if isinstance(data['markers'], list):
self.__markers = data['markers']
if 'main_color' in data:
if isinstance(data['main_color'], str):
self.__main_color = hex2rgb(data['main_color'])
| 29.460674
| 147
| 0.691838
|
from numbers import Number
import yaml
from .color_tools import hex2rgb
def __default_grid__(ax):
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on()
class FigStyle:
def __init__(self, config_file):
self.__width = None
self.__ratio = None
self.__hspace = None
self.__colors = [None]
self.__linestyles = [None]
self.__markers = [None]
self.__grid = __default_grid__
self.__main_color = None
self.read_config_file(config_file)
@property
def colors(self):
return self.__colors
@property
def width(self):
return self.__width
@property
def ratio(self):
return self.__ratio
@property
def hspace(self):
return self.__hspace
@property
def grid(self):
return self.__grid
@property
def linestyles(self):
return self.__linestyles
@property
def markers(self):
return self.__markers
@property
def main_color(self):
return self.__main_color
def read_config_file(self, filename):
if not isinstance(filename, str):
raise ValueError('"file_name" must be a string')
with open(filename, 'r') as stream:
try:
data = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
if 'width' not in data:
raise ValueError('The "figstyle" file must have a "width" field')
self.__width = float(data['width'])
if 'ratio' not in data:
raise ValueError('The "figstyle" file must have a "ratio" field')
if isinstance(data['ratio'], list) and len(data['ratio']) == 2 and isinstance(data['ratio'][0], Number) and isinstance(data['ratio'][1], Number):
self.__ratio = data['ratio']
else:
raise ValueError('Error reading "' + filename + '": ratio must be a list of two numbers [x_ratio, y_ratio]')
if 'hspace' not in data:
raise ValueError('The "figstyle" file must have a "hspace" field')
self.__hspace = float(data['hspace'])
if isinstance(data['colors'], list):
self.__colors = [None]*len(data['colors'])
for k in range(len(data['colors'])):
self.__colors[k] = hex2rgb(data['colors'][k])
if 'linestyles' in data:
if isinstance(data['linestyles'], list):
self.__linestyles = data['linestyles']
if 'markers' in data:
if isinstance(data['markers'], list):
self.__markers = data['markers']
if 'main_color' in data:
if isinstance(data['main_color'], str):
self.__main_color = hex2rgb(data['main_color'])
| true
| true
|
7903850178306759b658bcc32156d19cd337843b
| 19,386
|
py
|
Python
|
CNN/CNNProcessData.py
|
soybase/DroneImageScripts
|
c077325a868237569592bd3820b3d873eddb4f83
|
[
"MIT"
] | 3
|
2019-08-04T06:11:15.000Z
|
2021-01-20T11:48:05.000Z
|
CNN/CNNProcessData.py
|
soybase/DroneImageScripts
|
c077325a868237569592bd3820b3d873eddb4f83
|
[
"MIT"
] | null | null | null |
CNN/CNNProcessData.py
|
soybase/DroneImageScripts
|
c077325a868237569592bd3820b3d873eddb4f83
|
[
"MIT"
] | 3
|
2019-08-04T06:11:18.000Z
|
2021-02-18T13:21:58.000Z
|
# import the necessary packages
import sys
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class CNNProcessData:
def __init__(self):
pass
def get_imagedatagenerator(self):
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
#rotation_range=20,
#width_shift_range=0.05,
#height_shift_range=0.05,
#horizontal_flip=True,
# vertical_flip=True,
#brightness_range=[0.8,1.2]
)
return datagen
def generate_croppings(self, testX, testY, image_size, number):
if number != 11:
raise Exception("Only implemented for number = 11 right now")
augmented_testX_1 = []
augmented_testX_2 = []
augmented_testX_3 = []
augmented_testX_4 = []
augmented_testX_5 = []
augmented_testX_6 = []
augmented_testX_7 = []
augmented_testX_8 = []
augmented_testX_9 = []
augmented_testX_10 = []
augmented_testX_11 = []
mid_image_size = int(round(image_size/2))
for img in testX:
height = img.shape[0]
small_height = int(round(height*0.1))
mid_height = int(round(height/2))
width = img.shape[1]
mid_width = int(round(width/2))
crop_img1 = img[height-image_size:height, 0:image_size]
crop_img2 = img[height-image_size:height, width-image_size:width]
crop_img3 = img[0:image_size, width-image_size:width]
crop_img4 = img[0:image_size, 0:image_size]
crop_img5 = img[mid_height-mid_image_size:mid_height+mid_image_size, mid_width-mid_image_size:mid_width+mid_image_size]
crop_img6 = img[mid_height-mid_image_size:mid_height+mid_image_size, 0:image_size]
crop_img7 = img[mid_height-mid_image_size:mid_height+mid_image_size, width-image_size:width]
crop_img8 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, 0:image_size]
crop_img9 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, width-image_size:width]
crop_img10 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, 0:image_size]
crop_img11 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, width-image_size:width]
augmented_testX_1.append(crop_img1)
augmented_testX_2.append(crop_img2)
augmented_testX_3.append(crop_img3)
augmented_testX_4.append(crop_img4)
augmented_testX_5.append(crop_img5)
augmented_testX_6.append(crop_img6)
augmented_testX_7.append(crop_img7)
augmented_testX_8.append(crop_img8)
augmented_testX_9.append(crop_img9)
augmented_testX_10.append(crop_img10)
augmented_testX_11.append(crop_img11)
augmented_testX_1 = np.array(augmented_testX_1)
augmented_testX_2 = np.array(augmented_testX_2)
augmented_testX_3 = np.array(augmented_testX_3)
augmented_testX_4 = np.array(augmented_testX_4)
augmented_testX_5 = np.array(augmented_testX_5)
augmented_testX_6 = np.array(augmented_testX_6)
augmented_testX_7 = np.array(augmented_testX_7)
augmented_testX_8 = np.array(augmented_testX_8)
augmented_testX_9 = np.array(augmented_testX_9)
augmented_testX_10 = np.array(augmented_testX_10)
augmented_testX_11 = np.array(augmented_testX_11)
testX = np.concatenate((augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4, augmented_testX_5, augmented_testX_6, augmented_testX_7, augmented_testX_8, augmented_testX_9, augmented_testX_10, augmented_testX_11))
# testXflipped = []
# for img in testX:
# horizontal_flip = cv2.flip( img, 0 )
# testXflipped.append(horizontal_flip)
# testXflipped = np.array(testXflipped)
# testX = np.concatenate((testX, testXflipped))
testY = np.repeat(testY, number)
return (testX, testY)
def create_montages(self, images, montage_image_number, image_size, full_montage_image_size):
output = []
if montage_image_number == 4:
data = images.reshape(int(len(images)/montage_image_number), montage_image_number, image_size, image_size, 3)
for iter in range(len(data)):
img_set = data[iter]
outputImage = np.zeros((full_montage_image_size, full_montage_image_size, 3))
outputImage[0:image_size, 0:image_size, :] = img_set[0]
outputImage[0:image_size, image_size:2*image_size, :] = img_set[1]
outputImage[image_size:2*image_size, 0:image_size, :] = img_set[2]
outputImage[image_size:2*image_size, image_size:2*image_size, :] = img_set[3]
# cv2.imshow("Result", outputImage)
# cv2.waitKey(0)
# raise Exception('Exit')
output.append(outputImage)
else:
raise Exception('Only implemented to montage 4 images into one image')
return np.array(output)
def process_cnn_data(self, images, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, data_augmentation, data_augmentation_test, montage_image_number, full_montage_image_size, output_autoencoder_model_file_path, log_file_path):
if log_file_path is not None:
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(images)
images = datagen.standardize(images)
aux_data["value"] = aux_data["value"].astype(float)
output_image_file = aux_data["output_image_file"].tolist()
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'densenet121_lstm_imagenet':
images = images.reshape(num_unique_stock_ids * num_unique_image_types, num_unique_time_days, input_image_size, input_image_size, 3)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
trainX_length = len(train_images)
testX_length = len(test_images)
train_images = train_images.reshape(trainX_length * num_unique_time_days, input_image_size, input_image_size, 3)
test_images = test_images.reshape(testX_length * num_unique_time_days, input_image_size, input_image_size, 3)
trainX_length_flat = len(train_images)
test_images = datagen.standardize(test_images)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
testX_resized = []
for img in test_images:
testX_resized.append(cv2.resize(img, (image_size, image_size)))
test_images = np.array(testX_resized)
test_images = test_images.reshape(data_augmentation_test * testX_length, num_unique_time_days, image_size, image_size, 3)
# trainX_aug = []
# trainY_aug = []
# augmented = datagen.flow(train_images, train_aux_data, batch_size=trainX_length_flat)
# for i in range(0, data_augmentation):
# X, y = augmented.next()
# if len(trainX_aug) == 0:
# trainX_aug = X
# trainY_aug = y
# else:
# trainX_aug = np.concatenate((trainX_aug, X))
# trainY_aug = np.concatenate((trainY_aug, y))
#
# trainX = trainX_aug
# trainY = trainY_aug
trainX_resized = []
for img in train_images:
trainX_resized.append(cv2.resize(img, (image_size, image_size)))
train_images = np.array(trainX_resized)
train_images = train_images.reshape(data_augmentation * trainX_length, num_unique_time_days, image_size, image_size, 3)
else:
images = self.create_montages(images, montage_image_number, image_size, full_montage_image_size)
(encoder, decoder, autoencoder) = self.build_autoencoder(full_montage_image_size, full_montage_image_size, 3)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
checkpoint = ModelCheckpoint(filepath=output_autoencoder_model_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min', save_frequency=1, save_weights_only=False)
callbacks_list = [checkpoint]
# train the convolutional autoencoder
H = autoencoder.fit(
train_images, train_images,
validation_data=(test_images, test_images),
epochs=25,
batch_size=32,
callbacks=callbacks_list
)
decoded = autoencoder.predict(images)
output_image_counter = 0
for image in decoded:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, decoded, test_size=0.2)
# testY_length = len(testY)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
# testY = testY.reshape(data_augmentation_test * testY_length, 1)
# augmented = datagen.flow(trainX, trainY, batch_size=len(trainX))
# for i in range(0, data_augmentation):
# X, y = augmented.next()
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
train_stock_id_categorical = stock_id_binarizer.transform(train_aux_data["stock_id"])
test_stock_id_categorical = stock_id_binarizer.transform(test_aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
train_accession_id_categorical = accession_id_binarizer.transform(train_aux_data["accession_id"])
test_accession_id_categorical = accession_id_binarizer.transform(test_aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
train_female_id_categorical = female_id_binarizer.transform(train_aux_data["female_id"])
test_female_id_categorical = female_id_binarizer.transform(test_aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
train_male_id_categorical = male_id_binarizer.transform(train_aux_data["male_id"])
test_male_id_categorical = male_id_binarizer.transform(test_aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
trainContinuous = cs.fit_transform(train_aux_data[continuous])
testContinuous = cs.transform(test_aux_data[continuous])
#trainX = np.hstack((train_stock_id_categorical, train_accession_id_categorical, train_female_id_categorical, train_male_id_categorical, trainContinuous))
#testX = np.hstack((test_stock_id_categorical, test_accession_id_categorical, test_female_id_categorical, test_male_id_categorical, testContinuous))
trainX = trainContinuous
testX = testContinuous
else:
trainX = []
testX = []
trainx = np.array(trainX)
testx = np.array(testX)
max_label = aux_data["value"].max()
trainY = train_aux_data["value"]/max_label
testY = test_aux_data["value"]/max_label
train_genotype_files = train_aux_data["genotype_file"].tolist()
test_genotype_files = test_aux_data["genotype_file"].tolist()
train_genotype_data = []
for f in train_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
train_genotype_data.append(np.array(geno_data.iloc[:,0]))
test_genotype_data = []
for f in test_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
test_genotype_data.append(np.array(geno_data.iloc[:,0]))
train_genotype_data = np.array(train_genotype_data)
test_genotype_data = np.array(test_genotype_data)
eprint(train_genotype_data)
eprint(testX)
eprint(trainX)
return (test_images, np.array(testX), testY.to_numpy(), test_genotype_data, train_images, np.array(trainX), trainY.to_numpy(), train_genotype_data)
def process_cnn_data_predictions(self, data, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, input_autoencoder_model_file_path, training_data, data_augmentation_test, montage_image_number, full_montage_image_size):
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(training_data)
data = datagen.standardize(data)
output_image_file = aux_data["output_image_file"].tolist()
data = self.create_montages(data, montage_image_number, image_size, full_montage_image_size)
autoencoder_model = load_model(input_autoencoder_model_file_path)
data = autoencoder_model.predict(data)
#ret = self.generate_croppings(data, None, image_size, data_augmentation_test)
#augmented_data = ret[0]
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'KerasCNNLSTMDenseNet121ImageNetWeights':
data = data.reshape(data_augmentation_test * num_unique_stock_ids * num_unique_image_types, num_unique_time_days, image_size, image_size, 3)
output_image_counter = 0
for image in data:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
stock_id_categorical = stock_id_binarizer.transform(aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
accession_id_categorical = accession_id_binarizer.transform(aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
female_id_categorical = female_id_binarizer.transform(aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
male_id_categorical = male_id_binarizer.transform(aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
fitContinuous = cs.fit_transform(aux_data[continuous])
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical, fitContinuous])
fitX = fitContinuous
else:
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical])
fitX = []
fitX = np.array(fitX)
max_label = aux_data["value"].max()
fitY = aux_data["value"]/max_label
genotype_files = aux_data["genotype_file"].tolist()
genotype_data = []
for f in genotype_files:
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
genotype_data.append(np.array(geno_data.iloc[:,0]))
genotype_data = np.array(genotype_data)
return (data, fitX, genotype_data, fitY.to_numpy())
def build_autoencoder(self, width, height, depth, filters=(32, 64), latentDim=16):
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = inputs
# loop over the number of filters
for f in filters:
# apply a CONV => RELU => BN operation
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
# start building the decoder model which will accept the
# output of the encoder as its inputs
latentInputs = Input(shape=(latentDim,))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
# loop over our number of filters again, but this time in
# reverse order
for f in filters[::-1]:
# apply a CONV_TRANSPOSE => RELU => BN operation
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
outputs = Activation("sigmoid")(x)
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
# our autoencoder is the encoder + decoder
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
# return a 3-tuple of the encoder, decoder, and autoencoder
return (encoder, decoder, autoencoder)
| 46.939467
| 288
| 0.666254
|
import sys
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class CNNProcessData:
def __init__(self):
pass
def get_imagedatagenerator(self):
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
)
return datagen
def generate_croppings(self, testX, testY, image_size, number):
if number != 11:
raise Exception("Only implemented for number = 11 right now")
augmented_testX_1 = []
augmented_testX_2 = []
augmented_testX_3 = []
augmented_testX_4 = []
augmented_testX_5 = []
augmented_testX_6 = []
augmented_testX_7 = []
augmented_testX_8 = []
augmented_testX_9 = []
augmented_testX_10 = []
augmented_testX_11 = []
mid_image_size = int(round(image_size/2))
for img in testX:
height = img.shape[0]
small_height = int(round(height*0.1))
mid_height = int(round(height/2))
width = img.shape[1]
mid_width = int(round(width/2))
crop_img1 = img[height-image_size:height, 0:image_size]
crop_img2 = img[height-image_size:height, width-image_size:width]
crop_img3 = img[0:image_size, width-image_size:width]
crop_img4 = img[0:image_size, 0:image_size]
crop_img5 = img[mid_height-mid_image_size:mid_height+mid_image_size, mid_width-mid_image_size:mid_width+mid_image_size]
crop_img6 = img[mid_height-mid_image_size:mid_height+mid_image_size, 0:image_size]
crop_img7 = img[mid_height-mid_image_size:mid_height+mid_image_size, width-image_size:width]
crop_img8 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, 0:image_size]
crop_img9 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, width-image_size:width]
crop_img10 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, 0:image_size]
crop_img11 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, width-image_size:width]
augmented_testX_1.append(crop_img1)
augmented_testX_2.append(crop_img2)
augmented_testX_3.append(crop_img3)
augmented_testX_4.append(crop_img4)
augmented_testX_5.append(crop_img5)
augmented_testX_6.append(crop_img6)
augmented_testX_7.append(crop_img7)
augmented_testX_8.append(crop_img8)
augmented_testX_9.append(crop_img9)
augmented_testX_10.append(crop_img10)
augmented_testX_11.append(crop_img11)
augmented_testX_1 = np.array(augmented_testX_1)
augmented_testX_2 = np.array(augmented_testX_2)
augmented_testX_3 = np.array(augmented_testX_3)
augmented_testX_4 = np.array(augmented_testX_4)
augmented_testX_5 = np.array(augmented_testX_5)
augmented_testX_6 = np.array(augmented_testX_6)
augmented_testX_7 = np.array(augmented_testX_7)
augmented_testX_8 = np.array(augmented_testX_8)
augmented_testX_9 = np.array(augmented_testX_9)
augmented_testX_10 = np.array(augmented_testX_10)
augmented_testX_11 = np.array(augmented_testX_11)
testX = np.concatenate((augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4, augmented_testX_5, augmented_testX_6, augmented_testX_7, augmented_testX_8, augmented_testX_9, augmented_testX_10, augmented_testX_11))
testY = np.repeat(testY, number)
return (testX, testY)
def create_montages(self, images, montage_image_number, image_size, full_montage_image_size):
output = []
if montage_image_number == 4:
data = images.reshape(int(len(images)/montage_image_number), montage_image_number, image_size, image_size, 3)
for iter in range(len(data)):
img_set = data[iter]
outputImage = np.zeros((full_montage_image_size, full_montage_image_size, 3))
outputImage[0:image_size, 0:image_size, :] = img_set[0]
outputImage[0:image_size, image_size:2*image_size, :] = img_set[1]
outputImage[image_size:2*image_size, 0:image_size, :] = img_set[2]
outputImage[image_size:2*image_size, image_size:2*image_size, :] = img_set[3]
output.append(outputImage)
else:
raise Exception('Only implemented to montage 4 images into one image')
return np.array(output)
def process_cnn_data(self, images, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, data_augmentation, data_augmentation_test, montage_image_number, full_montage_image_size, output_autoencoder_model_file_path, log_file_path):
if log_file_path is not None:
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(images)
images = datagen.standardize(images)
aux_data["value"] = aux_data["value"].astype(float)
output_image_file = aux_data["output_image_file"].tolist()
if keras_model_type == 'densenet121_lstm_imagenet':
images = images.reshape(num_unique_stock_ids * num_unique_image_types, num_unique_time_days, input_image_size, input_image_size, 3)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
trainX_length = len(train_images)
testX_length = len(test_images)
train_images = train_images.reshape(trainX_length * num_unique_time_days, input_image_size, input_image_size, 3)
test_images = test_images.reshape(testX_length * num_unique_time_days, input_image_size, input_image_size, 3)
trainX_length_flat = len(train_images)
test_images = datagen.standardize(test_images)
testX_resized = []
for img in test_images:
testX_resized.append(cv2.resize(img, (image_size, image_size)))
test_images = np.array(testX_resized)
test_images = test_images.reshape(data_augmentation_test * testX_length, num_unique_time_days, image_size, image_size, 3)
trainX_resized = []
for img in train_images:
trainX_resized.append(cv2.resize(img, (image_size, image_size)))
train_images = np.array(trainX_resized)
train_images = train_images.reshape(data_augmentation * trainX_length, num_unique_time_days, image_size, image_size, 3)
else:
images = self.create_montages(images, montage_image_number, image_size, full_montage_image_size)
(encoder, decoder, autoencoder) = self.build_autoencoder(full_montage_image_size, full_montage_image_size, 3)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
checkpoint = ModelCheckpoint(filepath=output_autoencoder_model_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min', save_frequency=1, save_weights_only=False)
callbacks_list = [checkpoint]
H = autoencoder.fit(
train_images, train_images,
validation_data=(test_images, test_images),
epochs=25,
batch_size=32,
callbacks=callbacks_list
)
decoded = autoencoder.predict(images)
output_image_counter = 0
for image in decoded:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, decoded, test_size=0.2)
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
train_stock_id_categorical = stock_id_binarizer.transform(train_aux_data["stock_id"])
test_stock_id_categorical = stock_id_binarizer.transform(test_aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
train_accession_id_categorical = accession_id_binarizer.transform(train_aux_data["accession_id"])
test_accession_id_categorical = accession_id_binarizer.transform(test_aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
train_female_id_categorical = female_id_binarizer.transform(train_aux_data["female_id"])
test_female_id_categorical = female_id_binarizer.transform(test_aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
train_male_id_categorical = male_id_binarizer.transform(train_aux_data["male_id"])
test_male_id_categorical = male_id_binarizer.transform(test_aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
trainContinuous = cs.fit_transform(train_aux_data[continuous])
testContinuous = cs.transform(test_aux_data[continuous])
trainX = trainContinuous
testX = testContinuous
else:
trainX = []
testX = []
trainx = np.array(trainX)
testx = np.array(testX)
max_label = aux_data["value"].max()
trainY = train_aux_data["value"]/max_label
testY = test_aux_data["value"]/max_label
train_genotype_files = train_aux_data["genotype_file"].tolist()
test_genotype_files = test_aux_data["genotype_file"].tolist()
train_genotype_data = []
for f in train_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
train_genotype_data.append(np.array(geno_data.iloc[:,0]))
test_genotype_data = []
for f in test_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
test_genotype_data.append(np.array(geno_data.iloc[:,0]))
train_genotype_data = np.array(train_genotype_data)
test_genotype_data = np.array(test_genotype_data)
eprint(train_genotype_data)
eprint(testX)
eprint(trainX)
return (test_images, np.array(testX), testY.to_numpy(), test_genotype_data, train_images, np.array(trainX), trainY.to_numpy(), train_genotype_data)
def process_cnn_data_predictions(self, data, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, input_autoencoder_model_file_path, training_data, data_augmentation_test, montage_image_number, full_montage_image_size):
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(training_data)
data = datagen.standardize(data)
output_image_file = aux_data["output_image_file"].tolist()
data = self.create_montages(data, montage_image_number, image_size, full_montage_image_size)
autoencoder_model = load_model(input_autoencoder_model_file_path)
data = autoencoder_model.predict(data)
if keras_model_type == 'KerasCNNLSTMDenseNet121ImageNetWeights':
data = data.reshape(data_augmentation_test * num_unique_stock_ids * num_unique_image_types, num_unique_time_days, image_size, image_size, 3)
output_image_counter = 0
for image in data:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
stock_id_categorical = stock_id_binarizer.transform(aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
accession_id_categorical = accession_id_binarizer.transform(aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
female_id_categorical = female_id_binarizer.transform(aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
male_id_categorical = male_id_binarizer.transform(aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
fitContinuous = cs.fit_transform(aux_data[continuous])
fitX = fitContinuous
else:
fitX = []
fitX = np.array(fitX)
max_label = aux_data["value"].max()
fitY = aux_data["value"]/max_label
genotype_files = aux_data["genotype_file"].tolist()
genotype_data = []
for f in genotype_files:
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
genotype_data.append(np.array(geno_data.iloc[:,0]))
genotype_data = np.array(genotype_data)
return (data, fitX, genotype_data, fitY.to_numpy())
def build_autoencoder(self, width, height, depth, filters=(32, 64), latentDim=16):
inputShape = (height, width, depth)
chanDim = -1
inputs = Input(shape=inputShape)
x = inputs
for f in filters:
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)
encoder = Model(inputs, latent, name="encoder")
latentInputs = Input(shape=(latentDim,))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
for f in filters[::-1]:
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
outputs = Activation("sigmoid")(x)
decoder = Model(latentInputs, outputs, name="decoder")
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
return (encoder, decoder, autoencoder)
| true
| true
|
790387b7cc575a1aa9ebe6c0b52b791947041c8f
| 7,931
|
py
|
Python
|
napari_animation/_qt/animation_widget.py
|
tlambert-forks/napari-animation
|
8c7119e69933bcba8f0263d3cab966f373a7cc24
|
[
"BSD-3-Clause"
] | null | null | null |
napari_animation/_qt/animation_widget.py
|
tlambert-forks/napari-animation
|
8c7119e69933bcba8f0263d3cab966f373a7cc24
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T23:29:26.000Z
|
2021-05-26T23:29:26.000Z
|
napari_animation/_qt/animation_widget.py
|
tlambert-forks/napari-animation
|
8c7119e69933bcba8f0263d3cab966f373a7cc24
|
[
"BSD-3-Clause"
] | null | null | null |
from pathlib import Path
from napari import Viewer
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QErrorMessage,
QFileDialog,
QPushButton,
QVBoxLayout,
QWidget,
)
from ..animation import Animation
from .animationslider_widget import AnimationSliderWidget
from .frame_widget import FrameWidget
from .keyframelistcontrol_widget import KeyFrameListControlWidget
from .keyframeslist_widget import KeyFramesListWidget
class AnimationWidget(QWidget):
"""Widget for interatviely making animations using the napari viewer.
Parameters
----------
viewer : napari.Viewer
napari viewer.
Attributes
----------
viewer : napari.Viewer
napari viewer.
animation : napari_animation.Animation
napari-animation animation in sync with the GUI.
"""
def __init__(self, viewer: Viewer, parent=None):
super().__init__(parent=parent)
# Store reference to viewer and create animation
self.viewer = viewer
self.animation = Animation(self.viewer)
# Initialise UI
self._init_ui()
# establish key bindings and callbacks
self._add_keybind_callbacks()
self._add_callbacks()
def _init_ui(self):
"""Initialise user interface"""
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._init_keyframes_list_control_widget()
self._init_keyframes_list_widget()
self._init_frame_widget()
self._init_save_button()
self._init_animation_slider_widget()
def _add_keybind_callbacks(self):
"""Bind keys"""
self.animation.viewer.bind_key(
"Alt-f", self._capture_keyframe_callback
)
self.animation.viewer.bind_key(
"Alt-r", self._replace_keyframe_callback
)
self.animation.viewer.bind_key("Alt-d", self._delete_keyframe_callback)
self.animation.viewer.bind_key("Alt-a", self._key_adv_frame)
self.animation.viewer.bind_key("Alt-b", self._key_back_frame)
def _add_callbacks(self):
"""Establish callbacks"""
self.keyframesListControlWidget.deleteButton.clicked.connect(
self._delete_keyframe_callback
)
self.keyframesListControlWidget.captureButton.clicked.connect(
self._capture_keyframe_callback
)
self.saveButton.clicked.connect(self._save_callback)
self.animationsliderWidget.valueChanged.connect(
self._move_animationslider_callback
)
self.viewer.events.theme.connect(
lambda e: self.keyframesListWidget._update_theme(e.value)
)
def _release_callbacks(self):
"""Release keys"""
self.animation.viewer.bind_key("Alt-f", None)
self.animation.viewer.bind_key("Alt-r", None)
self.animation.viewer.bind_key("Alt-d", None)
self.animation.viewer.bind_key("Alt-a", None)
self.animation.viewer.bind_key("Alt-b", None)
def _init_frame_widget(self):
self.frameWidget = FrameWidget(parent=self)
self._layout.addWidget(self.frameWidget)
def _init_keyframes_list_control_widget(self):
self.keyframesListControlWidget = KeyFrameListControlWidget(
animation=self.animation, parent=self
)
self._layout.addWidget(self.keyframesListControlWidget)
def _init_keyframes_list_widget(self):
self.keyframesListWidget = KeyFramesListWidget(
self.animation, parent=self
)
self.keyframesListWidget._update_theme(self.viewer.theme)
self._layout.addWidget(self.keyframesListWidget)
def _init_save_button(self):
self.saveButton = QPushButton("Save Animation", parent=self)
self._layout.addWidget(self.saveButton)
def _init_animation_slider_widget(self):
self.animationsliderWidget = AnimationSliderWidget(
self.animation, orientation=Qt.Horizontal, parent=self
)
self._layout.addWidget(self.animationsliderWidget)
def _get_interpolation_steps(self):
return int(self.frameWidget.stepsSpinBox.value())
def _get_easing_function(self):
return self.frameWidget.get_easing_func()
def _capture_keyframe_callback(self, event=None):
"""Record current key-frame"""
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
)
if len(self.animation.key_frames) == 1:
self.keyframesListControlWidget.deleteButton.setEnabled(True)
self.keyframesListWidget.setEnabled(True)
self.frameWidget.setEnabled(True)
self.animationsliderWidget.requires_update = True
def _update_frame_widget_from_animation(self):
self.frameWidget.update_from_animation()
def _replace_keyframe_callback(self, event=None):
"""Replace current key-frame with new view"""
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
insert=False,
)
self.animationsliderWidget.requires_update = True
def _delete_keyframe_callback(self, event=None):
"""Delete current key-frame"""
if len(self.animation.key_frames) > 0:
self.animation.key_frames.pop(self.animation.frame)
if len(self.animation.key_frames) == 0:
self.keyframesListControlWidget.deleteButton.setEnabled(False)
self.keyframesListWidget.setEnabled(False)
self.frameWidget.setEnabled(False)
self.animationsliderWidget.requires_update = True
def _key_adv_frame(self, event=None):
"""Go forwards in key-frame list"""
new_frame = (self.animation.frame + 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _key_back_frame(self, event=None):
"""Go backwards in key-frame list"""
new_frame = (self.animation.frame - 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _save_callback(self, event=None):
if len(self.animation.key_frames) < 2:
error_dialog = QErrorMessage()
error_dialog.showMessage(
f"You need at least two key frames to generate \
an animation. Your only have {len(self.animation.key_frames)}"
)
error_dialog.exec_()
else:
filters = (
"Video files (*.mp4 *.gif *.mov *.avi *.mpg *.mpeg *.mkv *.wmv)"
";;Folder of PNGs (*)" # sep filters with ";;"
)
filename, _filter = QFileDialog.getSaveFileName(
self, "Save animation", str(Path.home()), filters
)
if filename:
self.animation.animate(filename)
def _move_animationslider_callback(self, event=None):
"""Scroll through interpolated states. Computes states if key-frames changed"""
self.animationsliderWidget.synchronise()
new_frame = self.animationsliderWidget.value()
self.animation._set_viewer_state(
self.animationsliderWidget.interpol_states[new_frame]
)
# This gets the index of the first key frame whose frame count is above new_frame
new_key_frame = (
self.animationsliderWidget.cumulative_frame_count > new_frame
).argmax()
new_key_frame -= 1 # to get the previous key frame
new_key_frame = int(new_key_frame) # to enable slicing a list with it
self.keyframesListWidget.setCurrentRowBlockingSignals(new_key_frame)
self.animation.frame = new_key_frame
def close(self):
self._release_callbacks()
super().close()
| 35.565022
| 89
| 0.668012
|
from pathlib import Path
from napari import Viewer
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QErrorMessage,
QFileDialog,
QPushButton,
QVBoxLayout,
QWidget,
)
from ..animation import Animation
from .animationslider_widget import AnimationSliderWidget
from .frame_widget import FrameWidget
from .keyframelistcontrol_widget import KeyFrameListControlWidget
from .keyframeslist_widget import KeyFramesListWidget
class AnimationWidget(QWidget):
def __init__(self, viewer: Viewer, parent=None):
super().__init__(parent=parent)
self.viewer = viewer
self.animation = Animation(self.viewer)
self._init_ui()
self._add_keybind_callbacks()
self._add_callbacks()
def _init_ui(self):
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._init_keyframes_list_control_widget()
self._init_keyframes_list_widget()
self._init_frame_widget()
self._init_save_button()
self._init_animation_slider_widget()
def _add_keybind_callbacks(self):
self.animation.viewer.bind_key(
"Alt-f", self._capture_keyframe_callback
)
self.animation.viewer.bind_key(
"Alt-r", self._replace_keyframe_callback
)
self.animation.viewer.bind_key("Alt-d", self._delete_keyframe_callback)
self.animation.viewer.bind_key("Alt-a", self._key_adv_frame)
self.animation.viewer.bind_key("Alt-b", self._key_back_frame)
def _add_callbacks(self):
self.keyframesListControlWidget.deleteButton.clicked.connect(
self._delete_keyframe_callback
)
self.keyframesListControlWidget.captureButton.clicked.connect(
self._capture_keyframe_callback
)
self.saveButton.clicked.connect(self._save_callback)
self.animationsliderWidget.valueChanged.connect(
self._move_animationslider_callback
)
self.viewer.events.theme.connect(
lambda e: self.keyframesListWidget._update_theme(e.value)
)
def _release_callbacks(self):
self.animation.viewer.bind_key("Alt-f", None)
self.animation.viewer.bind_key("Alt-r", None)
self.animation.viewer.bind_key("Alt-d", None)
self.animation.viewer.bind_key("Alt-a", None)
self.animation.viewer.bind_key("Alt-b", None)
def _init_frame_widget(self):
self.frameWidget = FrameWidget(parent=self)
self._layout.addWidget(self.frameWidget)
def _init_keyframes_list_control_widget(self):
self.keyframesListControlWidget = KeyFrameListControlWidget(
animation=self.animation, parent=self
)
self._layout.addWidget(self.keyframesListControlWidget)
def _init_keyframes_list_widget(self):
self.keyframesListWidget = KeyFramesListWidget(
self.animation, parent=self
)
self.keyframesListWidget._update_theme(self.viewer.theme)
self._layout.addWidget(self.keyframesListWidget)
def _init_save_button(self):
self.saveButton = QPushButton("Save Animation", parent=self)
self._layout.addWidget(self.saveButton)
def _init_animation_slider_widget(self):
self.animationsliderWidget = AnimationSliderWidget(
self.animation, orientation=Qt.Horizontal, parent=self
)
self._layout.addWidget(self.animationsliderWidget)
def _get_interpolation_steps(self):
return int(self.frameWidget.stepsSpinBox.value())
def _get_easing_function(self):
return self.frameWidget.get_easing_func()
def _capture_keyframe_callback(self, event=None):
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
)
if len(self.animation.key_frames) == 1:
self.keyframesListControlWidget.deleteButton.setEnabled(True)
self.keyframesListWidget.setEnabled(True)
self.frameWidget.setEnabled(True)
self.animationsliderWidget.requires_update = True
def _update_frame_widget_from_animation(self):
self.frameWidget.update_from_animation()
def _replace_keyframe_callback(self, event=None):
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
insert=False,
)
self.animationsliderWidget.requires_update = True
def _delete_keyframe_callback(self, event=None):
if len(self.animation.key_frames) > 0:
self.animation.key_frames.pop(self.animation.frame)
if len(self.animation.key_frames) == 0:
self.keyframesListControlWidget.deleteButton.setEnabled(False)
self.keyframesListWidget.setEnabled(False)
self.frameWidget.setEnabled(False)
self.animationsliderWidget.requires_update = True
def _key_adv_frame(self, event=None):
new_frame = (self.animation.frame + 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _key_back_frame(self, event=None):
new_frame = (self.animation.frame - 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _save_callback(self, event=None):
if len(self.animation.key_frames) < 2:
error_dialog = QErrorMessage()
error_dialog.showMessage(
f"You need at least two key frames to generate \
an animation. Your only have {len(self.animation.key_frames)}"
)
error_dialog.exec_()
else:
filters = (
"Video files (*.mp4 *.gif *.mov *.avi *.mpg *.mpeg *.mkv *.wmv)"
";;Folder of PNGs (*)"
)
filename, _filter = QFileDialog.getSaveFileName(
self, "Save animation", str(Path.home()), filters
)
if filename:
self.animation.animate(filename)
def _move_animationslider_callback(self, event=None):
self.animationsliderWidget.synchronise()
new_frame = self.animationsliderWidget.value()
self.animation._set_viewer_state(
self.animationsliderWidget.interpol_states[new_frame]
)
new_key_frame = (
self.animationsliderWidget.cumulative_frame_count > new_frame
).argmax()
new_key_frame -= 1
new_key_frame = int(new_key_frame)
self.keyframesListWidget.setCurrentRowBlockingSignals(new_key_frame)
self.animation.frame = new_key_frame
def close(self):
self._release_callbacks()
super().close()
| true
| true
|
79038800ff62b848428dce8a67a9e44b4699bb5e
| 1,284
|
py
|
Python
|
imapfw/testing/libcore.py
|
paralax/imapfw
|
740a4fed1a1de28e4134a115a1dd9c6e90e29ec1
|
[
"MIT"
] | 492
|
2015-10-12T18:18:48.000Z
|
2022-02-14T11:46:46.000Z
|
imapfw/testing/libcore.py
|
paralax/imapfw
|
740a4fed1a1de28e4134a115a1dd9c6e90e29ec1
|
[
"MIT"
] | 21
|
2015-11-10T00:49:07.000Z
|
2021-12-30T07:51:25.000Z
|
imapfw/testing/libcore.py
|
paralax/imapfw
|
740a4fed1a1de28e4134a115a1dd9c6e90e29ec1
|
[
"MIT"
] | 40
|
2015-10-15T13:27:31.000Z
|
2021-12-30T07:52:24.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2015, Nicolas Sebrecht & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
def testingPath():
return os.path.join(
os.path.abspath(sys.modules['imapfw'].__path__[0]),
'testing')
| 41.419355
| 79
| 0.759346
|
import os
import sys
def testingPath():
return os.path.join(
os.path.abspath(sys.modules['imapfw'].__path__[0]),
'testing')
| true
| true
|
7903881baf28fb04948dceaf26f6f1e7b726da74
| 417
|
py
|
Python
|
polyaxon/api/repos/serializers.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/api/repos/serializers.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/api/repos/serializers.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
from rest_framework import fields, serializers
from db.models.repos import Repo
class RepoSerializer(serializers.ModelSerializer):
project = fields.SerializerMethodField()
class Meta:
model = Repo
fields = ('project', 'created_at', 'updated_at', 'is_public', )
def get_user(self, obj):
return obj.user.username
def get_project(self, obj):
return obj.project.name
| 23.166667
| 71
| 0.688249
|
from rest_framework import fields, serializers
from db.models.repos import Repo
class RepoSerializer(serializers.ModelSerializer):
project = fields.SerializerMethodField()
class Meta:
model = Repo
fields = ('project', 'created_at', 'updated_at', 'is_public', )
def get_user(self, obj):
return obj.user.username
def get_project(self, obj):
return obj.project.name
| true
| true
|
79038875f90d1870f897fdb2f00e9d30483286bc
| 1,834
|
py
|
Python
|
xchainpy/xchainpy_crypto/xchainpy_crypto/models/CryptoStruct.py
|
tirinox/xchainpy-lib
|
e01f146993c45ca0dad3ca40f07e7b45ed65653e
|
[
"MIT"
] | null | null | null |
xchainpy/xchainpy_crypto/xchainpy_crypto/models/CryptoStruct.py
|
tirinox/xchainpy-lib
|
e01f146993c45ca0dad3ca40f07e7b45ed65653e
|
[
"MIT"
] | null | null | null |
xchainpy/xchainpy_crypto/xchainpy_crypto/models/CryptoStruct.py
|
tirinox/xchainpy-lib
|
e01f146993c45ca0dad3ca40f07e7b45ed65653e
|
[
"MIT"
] | null | null | null |
from .KdfParams import KdfParams
from .CipherParams import CipherParams
class CryptoStruct:
def __init__(
self,
cipher: int,
ciphertext: str,
cipherparams: CipherParams,
kdf: str,
kdfparams: KdfParams,
mac: str,
):
self._cipher = cipher
self._ciphertext = ciphertext
self._cipherparams = cipherparams
self._kdf = kdf
self._kdfparams = kdfparams
self._mac = mac
@classmethod
def from_dict(cls, crypto):
new_crypto = cls.__new__(cls)
for key in crypto:
setattr(new_crypto, key, crypto[key])
return new_crypto
@property
def cipher(self):
return self._cipher
@cipher.setter
def cipher(self, cipher):
self._cipher = cipher
@property
def ciphertext(self):
return self._ciphertext
@ciphertext.setter
def ciphertext(self, ciphertext):
self._ciphertext = ciphertext
@property
def cipherparams(self):
return self._cipherparams
@cipherparams.setter
def cipherparams(self, cipherparams):
if isinstance(cipherparams, dict):
self._cipherparams = CipherParams.from_dict(cipherparams)
else:
self._cipherparams = cipherparams
@property
def kdf(self):
return self._kdf
@kdf.setter
def kdf(self, kdf):
self._kdf = kdf
@property
def kdfparams(self):
return self._kdfparams
@kdfparams.setter
def kdfparams(self, kdfparams):
if isinstance(kdfparams, dict):
self._kdfparams = KdfParams.from_dict(kdfparams)
else:
self._kdfparams = kdfparams
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac
| 22.641975
| 69
| 0.609051
|
from .KdfParams import KdfParams
from .CipherParams import CipherParams
class CryptoStruct:
def __init__(
self,
cipher: int,
ciphertext: str,
cipherparams: CipherParams,
kdf: str,
kdfparams: KdfParams,
mac: str,
):
self._cipher = cipher
self._ciphertext = ciphertext
self._cipherparams = cipherparams
self._kdf = kdf
self._kdfparams = kdfparams
self._mac = mac
@classmethod
def from_dict(cls, crypto):
new_crypto = cls.__new__(cls)
for key in crypto:
setattr(new_crypto, key, crypto[key])
return new_crypto
@property
def cipher(self):
return self._cipher
@cipher.setter
def cipher(self, cipher):
self._cipher = cipher
@property
def ciphertext(self):
return self._ciphertext
@ciphertext.setter
def ciphertext(self, ciphertext):
self._ciphertext = ciphertext
@property
def cipherparams(self):
return self._cipherparams
@cipherparams.setter
def cipherparams(self, cipherparams):
if isinstance(cipherparams, dict):
self._cipherparams = CipherParams.from_dict(cipherparams)
else:
self._cipherparams = cipherparams
@property
def kdf(self):
return self._kdf
@kdf.setter
def kdf(self, kdf):
self._kdf = kdf
@property
def kdfparams(self):
return self._kdfparams
@kdfparams.setter
def kdfparams(self, kdfparams):
if isinstance(kdfparams, dict):
self._kdfparams = KdfParams.from_dict(kdfparams)
else:
self._kdfparams = kdfparams
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac
| true
| true
|
790388f30ead9eb8675e63e93d59d9cd81670aea
| 3,437
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 2
|
2022-03-30T09:55:45.000Z
|
2022-03-30T09:55:49.000Z
|
python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import unittest
import numpy as np
from enum import Enum
import paddle
import paddle.static
map_np_dtype_to_fluid_dtype = {
'bool': "bool",
'int8': "int8",
'uint8': "uint8",
"int32": "int32",
"int64": "int64",
"float16": "float16",
"float32": "float32",
"float64": "float64",
}
class ExecutionMode(Enum):
CPU_FP32 = 1
IPU_FP32 = 2
# enable_fp16 through ipu_strategy.enable_fp16
IPU_POPART_FP16 = 3
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class IPUOpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get random seeds
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.SEED = 2021
np.random.seed(cls.SEED)
random.seed(cls.SEED)
# Enable paddle static graph mode
paddle.enable_static()
@classmethod
def tearDownClass(cls):
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
@classmethod
def use_ipumodel(cls):
if 'POPLAR_IPUMODEL' not in os.environ:
return False
else:
flag = os.environ['POPLAR_IPUMODEL']
if flag.upper() in ['1', "TRUE"]:
return True
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = False
self.epoch = 1
def check(self, outputs, check_shape=False):
cpu_fp32 = outputs[ExecutionMode.CPU_FP32]
ipu_fp32 = outputs[ExecutionMode.IPU_FP32]
max_diff = np.abs(cpu_fp32 - ipu_fp32).max()
fp32_flag = np.allclose(
cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol)
self.assertTrue(fp32_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
ipu_popart_fp16 = None
if ExecutionMode.IPU_POPART_FP16 in outputs.keys():
ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16]
max_diff = np.abs(ipu_popart_fp16.astype(np.float32) -
cpu_fp32).max()
fp16_flag = np.allclose(
ipu_popart_fp16.astype(np.float32),
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape)
| 29.376068
| 74
| 0.635438
|
import os
import random
import unittest
import numpy as np
from enum import Enum
import paddle
import paddle.static
map_np_dtype_to_fluid_dtype = {
'bool': "bool",
'int8': "int8",
'uint8': "uint8",
"int32": "int32",
"int64": "int64",
"float16": "float16",
"float32": "float32",
"float64": "float64",
}
class ExecutionMode(Enum):
CPU_FP32 = 1
IPU_FP32 = 2
IPU_POPART_FP16 = 3
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class IPUOpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.SEED = 2021
np.random.seed(cls.SEED)
random.seed(cls.SEED)
paddle.enable_static()
@classmethod
def tearDownClass(cls):
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
@classmethod
def use_ipumodel(cls):
if 'POPLAR_IPUMODEL' not in os.environ:
return False
else:
flag = os.environ['POPLAR_IPUMODEL']
if flag.upper() in ['1', "TRUE"]:
return True
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = False
self.epoch = 1
def check(self, outputs, check_shape=False):
cpu_fp32 = outputs[ExecutionMode.CPU_FP32]
ipu_fp32 = outputs[ExecutionMode.IPU_FP32]
max_diff = np.abs(cpu_fp32 - ipu_fp32).max()
fp32_flag = np.allclose(
cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol)
self.assertTrue(fp32_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
ipu_popart_fp16 = None
if ExecutionMode.IPU_POPART_FP16 in outputs.keys():
ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16]
max_diff = np.abs(ipu_popart_fp16.astype(np.float32) -
cpu_fp32).max()
fp16_flag = np.allclose(
ipu_popart_fp16.astype(np.float32),
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape)
| true
| true
|
79038907b73bfe51adef149fbff6c6d5dc7f702a
| 53,715
|
py
|
Python
|
cwltool/main.py
|
suecharo/cwltool
|
997bddafe9837c551ff7681e7bbc5f3dea1b3096
|
[
"Apache-2.0"
] | null | null | null |
cwltool/main.py
|
suecharo/cwltool
|
997bddafe9837c551ff7681e7bbc5f3dea1b3096
|
[
"Apache-2.0"
] | 11
|
2022-02-17T03:20:41.000Z
|
2022-03-30T10:54:02.000Z
|
cwltool/main.py
|
suecharo/cwltool
|
997bddafe9837c551ff7681e7bbc5f3dea1b3096
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
mergedirs,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
loader.fetcher.urljoin,
file_uri(os.getcwd()) + "/",
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(
MutableSequence[CWLOutputAtomType], mergedirs(sfs)
)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
"""Prepare a LoadingContext from the given arguments."""
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
"""Copy down requirements and hints from ancestors of a given process."""
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_process."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
"""Recursively find targets for --subgraph and friends."""
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(
process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/"
)
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| 35.69103
| 88
| 0.574365
|
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
mergedirs,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
loader.fetcher.urljoin,
file_uri(os.getcwd()) + "/",
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(
MutableSequence[CWLOutputAtomType], mergedirs(sfs)
)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
def __init__(self) -> None:
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(
process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/"
)
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {}
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
)
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
)
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| true
| true
|
790389486d1f0c19a68d44dcefd563bc953d8c5b
| 3,715
|
py
|
Python
|
docs/source/conf.py
|
m-kuhn/sqlfluff
|
8c7bbcd3346abf7f613454b5d597252292be38cb
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
m-kuhn/sqlfluff
|
8c7bbcd3346abf7f613454b5d597252292be38cb
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
m-kuhn/sqlfluff
|
8c7bbcd3346abf7f613454b5d597252292be38cb
|
[
"MIT"
] | null | null | null |
"""Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options. For a full
list see the documentation:
https://www.sphinx-doc.org/en/master/usage/configuration.html
"""
import configparser
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Get the global config info as currently stated
# (we use the config file to avoid actually loading any python here)
config = configparser.ConfigParser()
config.read(["../../src/sqlfluff/config.ini"])
stable_version = config.get("sqlfluff", "stable_version")
# -- Project information -----------------------------------------------------
project = "SQLFluff"
copyright = "2019, Alan Cruickshank"
author = "Alan Cruickshank"
# The full version, including alpha/beta/rc tags
release = stable_version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# Autodocumentation from docstrings
"sphinx.ext.autodoc",
# Allow Google style docstrings
"sphinx.ext.napoleon",
# Documenting click commands
"sphinx_click.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Master doc
master_doc = "index"
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
html_favicon = "favicon-fluff.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for Alabaster Theme ---------------------------------------------
html_theme_options = {
"logo": "images/sqlfluff-lrg.png",
# Icon for iOS shortcuts
"touch_icon": "images/sqlfluff-sm2-sq.png",
"github_user": "sqlfluff",
"github_repo": "sqlfluff",
# Github Fork button
"github_banner": True,
# Github link button
"github_button": True,
# Codecov button
"codecov_button": True,
}
def ultimate_replace(app, docname, source):
"""Replaces variables in docs, including code blocks.
From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229
"""
result = source[0]
for key in app.config.ultimate_replacements:
result = result.replace(key, app.config.ultimate_replacements[key])
source[0] = result
ultimate_replacements = {"|release|": release}
def setup(app):
"""Configures the documentation app."""
app.add_config_value("ultimate_replacements", {}, True)
app.connect("source-read", ultimate_replace)
| 32.587719
| 82
| 0.655989
|
import configparser
config = configparser.ConfigParser()
config.read(["../../src/sqlfluff/config.ini"])
stable_version = config.get("sqlfluff", "stable_version")
project = "SQLFluff"
copyright = "2019, Alan Cruickshank"
author = "Alan Cruickshank"
release = stable_version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_click.ext",
]
templates_path = ["_templates"]
exclude_patterns = []
master_doc = "index"
add_module_names = False
html_theme = "alabaster"
html_favicon = "favicon-fluff.png"
html_static_path = ["_static"]
html_theme_options = {
"logo": "images/sqlfluff-lrg.png",
"touch_icon": "images/sqlfluff-sm2-sq.png",
"github_user": "sqlfluff",
"github_repo": "sqlfluff",
"github_banner": True,
"github_button": True,
"codecov_button": True,
}
def ultimate_replace(app, docname, source):
result = source[0]
for key in app.config.ultimate_replacements:
result = result.replace(key, app.config.ultimate_replacements[key])
source[0] = result
ultimate_replacements = {"|release|": release}
def setup(app):
app.add_config_value("ultimate_replacements", {}, True)
app.connect("source-read", ultimate_replace)
| true
| true
|
790389e06c57fe14d00a09909593ddc3969e8ca0
| 3,592
|
py
|
Python
|
swarmlib/cuckoosearch/cuckoo_problem.py
|
alxfmpl/swarmlib
|
625645d466223ebef35fa1492d47e1a252cfd863
|
[
"BSD-3-Clause"
] | null | null | null |
swarmlib/cuckoosearch/cuckoo_problem.py
|
alxfmpl/swarmlib
|
625645d466223ebef35fa1492d47e1a252cfd863
|
[
"BSD-3-Clause"
] | null | null | null |
swarmlib/cuckoosearch/cuckoo_problem.py
|
alxfmpl/swarmlib
|
625645d466223ebef35fa1492d47e1a252cfd863
|
[
"BSD-3-Clause"
] | 2
|
2020-09-30T21:29:26.000Z
|
2020-12-22T15:15:52.000Z
|
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
# pylint: disable=too-many-instance-attributes
from copy import deepcopy
import logging
import numpy as np
from .nest import Nest
from ..util import levy_flight as cuckoo
from .visualizer import Visualizer
LOGGER = logging.getLogger(__name__)
class CuckooProblem:
def __init__(self, **kwargs):
"""
Initialize a new cuckoo search problem.
"""
self.__upper_boundary = kwargs.get('upper_boundary', 4.)
self.__lower_boundary = kwargs.get('lower_boundary', 0.)
self.__alpha = kwargs.pop('alpha', 1)
self.__max_generations = kwargs.pop('max_generations', 10)
self.__lambda = kwargs.pop('lambda', 1.5)
self.__p_a = kwargs.pop('p_a', .1)
self.__function = kwargs['function']
self.__nests = [
Nest(lower_boundary=self.__lower_boundary, upper_boundary=self.__upper_boundary, function=self.__function)
for _ in range(kwargs['nests'])
]
# Initialize visualizer for plotting
kwargs['iteration_number'] = self.__max_generations
self.__visualizer = Visualizer(**kwargs)
def solve(self) -> Nest:
nest_indices = np.array(range(len(self.__nests)))
best_nest = deepcopy(min(self.__nests, key=lambda nest: nest.value))
positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests])
self.__visualizer.add_data(positions=positions, best_position=best_nest.position, abandoned=abandoned)
LOGGER.info('Iteration 0 best solution="%s" at position="%s"', best_nest.value, best_nest.position)
for iteration in range(self.__max_generations):
# Perform levy flights to get cuckoo's new position
new_cuckoo_pos = [
np.clip(cuckoo.levy_flight(nest.position, self.__alpha, self.__lambda), a_min=self.__lower_boundary, a_max=self.__upper_boundary)
for nest in self.__nests
]
# Randomly select nests to be updated
np.random.shuffle(nest_indices)
# Update nests
for index, pos in zip(nest_indices, new_cuckoo_pos):
self.__nests[index].update_pos(pos)
# Abandon nests randomly considering p_a
for nest in self.__nests:
if np.random.random_sample() < self.__p_a:
nest.abandon()
# Update best nest
current_best = min(self.__nests, key=lambda nest: nest.value)
if current_best.value < best_nest.value:
best_nest = deepcopy(current_best)
LOGGER.info('Iteration %i Found new best solution="%s" at position="%s"', iteration+1, best_nest.value, best_nest.position)
# Add data for plot
positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests])
self.__visualizer.add_data(positions=positions, best_position=current_best.position, abandoned=abandoned)
LOGGER.info('Last best solution="%s" at position="%s"', best_nest.value, best_nest.position)
return best_nest
def replay(self):
"""
Start the problems visualization.
"""
self.__visualizer.replay()
| 40.359551
| 145
| 0.612194
|
from copy import deepcopy
import logging
import numpy as np
from .nest import Nest
from ..util import levy_flight as cuckoo
from .visualizer import Visualizer
LOGGER = logging.getLogger(__name__)
class CuckooProblem:
def __init__(self, **kwargs):
self.__upper_boundary = kwargs.get('upper_boundary', 4.)
self.__lower_boundary = kwargs.get('lower_boundary', 0.)
self.__alpha = kwargs.pop('alpha', 1)
self.__max_generations = kwargs.pop('max_generations', 10)
self.__lambda = kwargs.pop('lambda', 1.5)
self.__p_a = kwargs.pop('p_a', .1)
self.__function = kwargs['function']
self.__nests = [
Nest(lower_boundary=self.__lower_boundary, upper_boundary=self.__upper_boundary, function=self.__function)
for _ in range(kwargs['nests'])
]
kwargs['iteration_number'] = self.__max_generations
self.__visualizer = Visualizer(**kwargs)
def solve(self) -> Nest:
nest_indices = np.array(range(len(self.__nests)))
best_nest = deepcopy(min(self.__nests, key=lambda nest: nest.value))
positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests])
self.__visualizer.add_data(positions=positions, best_position=best_nest.position, abandoned=abandoned)
LOGGER.info('Iteration 0 best solution="%s" at position="%s"', best_nest.value, best_nest.position)
for iteration in range(self.__max_generations):
new_cuckoo_pos = [
np.clip(cuckoo.levy_flight(nest.position, self.__alpha, self.__lambda), a_min=self.__lower_boundary, a_max=self.__upper_boundary)
for nest in self.__nests
]
# Randomly select nests to be updated
np.random.shuffle(nest_indices)
# Update nests
for index, pos in zip(nest_indices, new_cuckoo_pos):
self.__nests[index].update_pos(pos)
# Abandon nests randomly considering p_a
for nest in self.__nests:
if np.random.random_sample() < self.__p_a:
nest.abandon()
# Update best nest
current_best = min(self.__nests, key=lambda nest: nest.value)
if current_best.value < best_nest.value:
best_nest = deepcopy(current_best)
LOGGER.info('Iteration %i Found new best solution="%s" at position="%s"', iteration+1, best_nest.value, best_nest.position)
# Add data for plot
positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests])
self.__visualizer.add_data(positions=positions, best_position=current_best.position, abandoned=abandoned)
LOGGER.info('Last best solution="%s" at position="%s"', best_nest.value, best_nest.position)
return best_nest
def replay(self):
self.__visualizer.replay()
| true
| true
|
79038afe4fbca1d48d22cec79d9ae113b6a2ec81
| 895
|
py
|
Python
|
Arcpy Script/SplitGDB/splitGDBTool.py
|
AkutoSai/ArcGIS
|
3bad0e06e7f99d4a91714abc575460383abebbd9
|
[
"Apache-2.0"
] | null | null | null |
Arcpy Script/SplitGDB/splitGDBTool.py
|
AkutoSai/ArcGIS
|
3bad0e06e7f99d4a91714abc575460383abebbd9
|
[
"Apache-2.0"
] | null | null | null |
Arcpy Script/SplitGDB/splitGDBTool.py
|
AkutoSai/ArcGIS
|
3bad0e06e7f99d4a91714abc575460383abebbd9
|
[
"Apache-2.0"
] | null | null | null |
import os
import arcpy
from arcpy import env
import time
def splitGDBTool(inputGDB,inputFrame,splitField,outputDir):
# Get FCs to be cliped
env.workspace = inputGDB
inputFCs = arcpy.ListFeatureClasses()
countFCs =len(inputFCs)
cursor = arcpy.da.SearchCursor(inputFrame,["TID","SHAPE@"])
index = 1
for row in cursor:
arcpy.CreateFileGDB_management(outputDir,row[0],"")
print index,time.strftime("%H:%M:%S "),row[0]+".gdb"
indexfc = 1
for inputFC in inputFCs:
print "\t",index,"-",indexfc, time.strftime("%H:%M:%S "), inputFC
outputFC = outputDir + os.sep + row[0] +".gdb" + os.sep + inputFC
arcpy.Clip_analysis(inputGDB+ os.sep + inputFC, row[1], outputFC)
indexfc += 1
index += 1
if __name__=="__main__":
splitGDBTool(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
| 33.148148
| 77
| 0.622346
|
import os
import arcpy
from arcpy import env
import time
def splitGDBTool(inputGDB,inputFrame,splitField,outputDir):
env.workspace = inputGDB
inputFCs = arcpy.ListFeatureClasses()
countFCs =len(inputFCs)
cursor = arcpy.da.SearchCursor(inputFrame,["TID","SHAPE@"])
index = 1
for row in cursor:
arcpy.CreateFileGDB_management(outputDir,row[0],"")
print index,time.strftime("%H:%M:%S "),row[0]+".gdb"
indexfc = 1
for inputFC in inputFCs:
print "\t",index,"-",indexfc, time.strftime("%H:%M:%S "), inputFC
outputFC = outputDir + os.sep + row[0] +".gdb" + os.sep + inputFC
arcpy.Clip_analysis(inputGDB+ os.sep + inputFC, row[1], outputFC)
indexfc += 1
index += 1
if __name__=="__main__":
splitGDBTool(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
| false
| true
|
79038b3e118983bc62e021442b3e8f2c6f1fa0d7
| 1,003
|
py
|
Python
|
examples/outlook/send_message.py
|
stardust85/Office365-REST-Python-Client
|
cd369c607c7d137a000734e9c5e8f03ae3e3c603
|
[
"MIT"
] | null | null | null |
examples/outlook/send_message.py
|
stardust85/Office365-REST-Python-Client
|
cd369c607c7d137a000734e9c5e8f03ae3e3c603
|
[
"MIT"
] | null | null | null |
examples/outlook/send_message.py
|
stardust85/Office365-REST-Python-Client
|
cd369c607c7d137a000734e9c5e8f03ae3e3c603
|
[
"MIT"
] | null | null | null |
from office365.graph.graph_client import GraphClient
from settings import settings
def get_token(auth_ctx):
"""Acquire token via client credential flow (ADAL Python library is utilized)"""
token = auth_ctx.acquire_token_with_client_credentials(
"https://graph.microsoft.com",
settings['client_credentials']['client_id'],
settings['client_credentials']['client_secret'])
return token
client = GraphClient(settings['tenant'], get_token)
message_json = {
"Message": {
"Subject": "Meet for lunch?",
"Body": {
"ContentType": "Text",
"Content": "The new cafeteria is open."
},
"ToRecipients": [
{
"EmailAddress": {
"Address": "vgrem@mediadev8.onmicrosoft.com"
}
}
]
},
"SaveToSentItems": "false"
}
login_name = "mdoe@mediadev8.onmicrosoft.com"
client.users[login_name].send_mail(message_json)
client.execute_query()
| 27.861111
| 84
| 0.612164
|
from office365.graph.graph_client import GraphClient
from settings import settings
def get_token(auth_ctx):
token = auth_ctx.acquire_token_with_client_credentials(
"https://graph.microsoft.com",
settings['client_credentials']['client_id'],
settings['client_credentials']['client_secret'])
return token
client = GraphClient(settings['tenant'], get_token)
message_json = {
"Message": {
"Subject": "Meet for lunch?",
"Body": {
"ContentType": "Text",
"Content": "The new cafeteria is open."
},
"ToRecipients": [
{
"EmailAddress": {
"Address": "vgrem@mediadev8.onmicrosoft.com"
}
}
]
},
"SaveToSentItems": "false"
}
login_name = "mdoe@mediadev8.onmicrosoft.com"
client.users[login_name].send_mail(message_json)
client.execute_query()
| true
| true
|
79038b49ef48f09871d20739b84b9b0fc714ba5a
| 1,501
|
py
|
Python
|
Week-7/Day-42.py
|
abusamrah2005/Python
|
b601a9daf8a5245bbcc1466d629adda43ed7c6ca
|
[
"Unlicense"
] | 4
|
2019-09-21T22:47:53.000Z
|
2020-04-17T03:32:21.000Z
|
Week-7/Day-42.py
|
abusamrah2005/Python
|
b601a9daf8a5245bbcc1466d629adda43ed7c6ca
|
[
"Unlicense"
] | null | null | null |
Week-7/Day-42.py
|
abusamrah2005/Python
|
b601a9daf8a5245bbcc1466d629adda43ed7c6ca
|
[
"Unlicense"
] | 2
|
2019-09-21T22:47:59.000Z
|
2020-04-17T03:32:14.000Z
|
# # Python Week-7 Day-42
# Python Classes and Objects 2
print(" -- Let us create a method in the Person class --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name )
p1 = Person("John", "36")
p1.myfunc()
print("----")
class Car:
def __init__(self, brand, price):
self.brand = brand
self.price = price
def myfunc(self):
print("Car brand Is: " + self.brand, "\nCar Price Is: " + self.price)
p1 = Car("Kia", "10000")
p1.myfunc()
print("\n -- Modify Object Properties -- ")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
p1.age = 40
print(p1.age)
print("\n -- Delete Object Properties --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
try :
del p1.age
print(p1.age)
except AttributeError as err:
print("Properties 'age' not Exist")
print("\n -- Delete Objects --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
del p1
try :
print(p1.age)
except NameError as err:
print("p1 is not Defined")
| 20.847222
| 77
| 0.588941
|
reate a method in the Person class --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name )
p1 = Person("John", "36")
p1.myfunc()
print("----")
class Car:
def __init__(self, brand, price):
self.brand = brand
self.price = price
def myfunc(self):
print("Car brand Is: " + self.brand, "\nCar Price Is: " + self.price)
p1 = Car("Kia", "10000")
p1.myfunc()
print("\n -- Modify Object Properties -- ")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
p1.age = 40
print(p1.age)
print("\n -- Delete Object Properties --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
try :
del p1.age
print(p1.age)
except AttributeError as err:
print("Properties 'age' not Exist")
print("\n -- Delete Objects --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
del p1
try :
print(p1.age)
except NameError as err:
print("p1 is not Defined")
| true
| true
|
79038b924918e263216f834e45808435613c405f
| 8,356
|
py
|
Python
|
test/functional/feature_proxy.py
|
barrystyle/Pricecoin
|
dc30cbc16cbb249a63e8ec3cbc31b04b887d4d58
|
[
"MIT"
] | 4
|
2018-04-24T20:56:48.000Z
|
2020-03-01T09:54:29.000Z
|
test/functional/feature_proxy.py
|
barrystyle/Pricecoin
|
dc30cbc16cbb249a63e8ec3cbc31b04b887d4d58
|
[
"MIT"
] | 2
|
2018-05-06T17:37:59.000Z
|
2018-07-06T11:36:18.000Z
|
test/functional/feature_proxy.py
|
barrystyle/Pricecoin
|
dc30cbc16cbb249a63e8ec3cbc31b04b887d4d58
|
[
"MIT"
] | 2
|
2019-01-20T20:56:15.000Z
|
2019-02-12T03:47:16.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test pricecoind with different proxy configuration.
Test plan:
- Start pricecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on pricecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create pricecoinds that connect to them
- Manipulate the pricecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.366337
| 121
| 0.625299
|
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| true
| true
|
79038b92a0283038fcc4f27a83a28e127274409d
| 805
|
py
|
Python
|
scripts/2-aggregate-land-cover.py
|
olga-turkovska/land-cover-patterns
|
67bbf0d01b7bb5ec5b1376a9fbc1da59addf2e31
|
[
"MIT"
] | null | null | null |
scripts/2-aggregate-land-cover.py
|
olga-turkovska/land-cover-patterns
|
67bbf0d01b7bb5ec5b1376a9fbc1da59addf2e31
|
[
"MIT"
] | null | null | null |
scripts/2-aggregate-land-cover.py
|
olga-turkovska/land-cover-patterns
|
67bbf0d01b7bb5ec5b1376a9fbc1da59addf2e31
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import rasterio
aggregate_forest = np.vectorize(lambda x: np.where(0 < x < 6, 1, x))
aggregate_agriculture = np.vectorize(lambda x: np.where(11 < x < 21, 21, x))
for dirs, subdirs, files in os.walk('../output/ceara/'):
for file in files:
wp_raster = rasterio.open('../output/ceara/' + file)
file_name = file.replace('id_', '')
wp_id = int(file_name.replace('.tif', ''))
out_raster_temp = aggregate_forest(wp_raster.read(range(1, 34)))
out_raster = aggregate_agriculture(out_raster_temp)
out_raster = out_raster.astype('uint8')
out_meta = wp_raster.meta
with rasterio.open('../output/ceara_agg_v2/' + 'agg_v2_id_' + str(wp_id) + '.tif', 'w', **out_meta) as raster:
raster.write(out_raster)
| 33.541667
| 118
| 0.643478
|
import os
import numpy as np
import rasterio
aggregate_forest = np.vectorize(lambda x: np.where(0 < x < 6, 1, x))
aggregate_agriculture = np.vectorize(lambda x: np.where(11 < x < 21, 21, x))
for dirs, subdirs, files in os.walk('../output/ceara/'):
for file in files:
wp_raster = rasterio.open('../output/ceara/' + file)
file_name = file.replace('id_', '')
wp_id = int(file_name.replace('.tif', ''))
out_raster_temp = aggregate_forest(wp_raster.read(range(1, 34)))
out_raster = aggregate_agriculture(out_raster_temp)
out_raster = out_raster.astype('uint8')
out_meta = wp_raster.meta
with rasterio.open('../output/ceara_agg_v2/' + 'agg_v2_id_' + str(wp_id) + '.tif', 'w', **out_meta) as raster:
raster.write(out_raster)
| true
| true
|
79038bb6da2b2408011106999d8fd2068c1db016
| 1,760
|
py
|
Python
|
sknetwork/utils/seeds.py
|
altana-tech/scikit-network
|
dedc9d3e694c7106e4709aae22dffb5142c15859
|
[
"BSD-3-Clause"
] | 1
|
2020-09-14T11:06:13.000Z
|
2020-09-14T11:06:13.000Z
|
sknetwork/utils/seeds.py
|
altana-tech/scikit-network
|
dedc9d3e694c7106e4709aae22dffb5142c15859
|
[
"BSD-3-Clause"
] | 2
|
2020-10-17T08:21:38.000Z
|
2020-10-21T09:13:30.000Z
|
sknetwork/utils/seeds.py
|
altana-tech/scikit-network
|
dedc9d3e694c7106e4709aae22dffb5142c15859
|
[
"BSD-3-Clause"
] | 1
|
2020-06-19T09:39:11.000Z
|
2020-06-19T09:39:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Apr, 2019
@author: Nathan de Lara <ndelara@enst.fr>
"""
from typing import Optional, Union
import numpy as np
from sknetwork.utils.check import check_seeds
def stack_seeds(n_row: int, n_col: int, seeds_row: Optional[Union[np.ndarray, dict]],
seeds_col: Optional[Union[np.ndarray, dict]] = None, default_value: float = -1) -> np.ndarray:
"""Process seeds for rows and columns and stack the results into a single vector."""
if seeds_row is None and seeds_col is None:
seeds_row = np.ones(n_row)
seeds_col = default_value * np.ones(n_col)
elif seeds_row is None:
seeds_row = default_value * np.ones(n_row)
elif seeds_col is None:
seeds_col = default_value * np.ones(n_col)
seeds_row = check_seeds(seeds_row, n_row)
seeds_col = check_seeds(seeds_col, n_col)
return np.hstack((seeds_row, seeds_col))
def seeds2probs(n: int, seeds: Union[dict, np.ndarray] = None) -> np.ndarray:
"""Transform seeds into probability vector.
Parameters
----------
n : int
Total number of samples.
seeds :
If ``None``, the uniform distribution is used.
Otherwise, a non-negative, non-zero vector or a dictionary must be provided.
Returns
-------
probs: np.ndarray
A probability vector.
"""
if seeds is None:
return np.ones(n) / n
else:
seeds = check_seeds(seeds, n)
probs = np.zeros_like(seeds, dtype=float)
ix = (seeds > 0)
probs[ix] = seeds[ix]
w: float = probs.sum()
if w > 0:
return probs / w
else:
raise ValueError('At least one seeds must have a positive probability.')
| 30.877193
| 110
| 0.628409
|
from typing import Optional, Union
import numpy as np
from sknetwork.utils.check import check_seeds
def stack_seeds(n_row: int, n_col: int, seeds_row: Optional[Union[np.ndarray, dict]],
seeds_col: Optional[Union[np.ndarray, dict]] = None, default_value: float = -1) -> np.ndarray:
if seeds_row is None and seeds_col is None:
seeds_row = np.ones(n_row)
seeds_col = default_value * np.ones(n_col)
elif seeds_row is None:
seeds_row = default_value * np.ones(n_row)
elif seeds_col is None:
seeds_col = default_value * np.ones(n_col)
seeds_row = check_seeds(seeds_row, n_row)
seeds_col = check_seeds(seeds_col, n_col)
return np.hstack((seeds_row, seeds_col))
def seeds2probs(n: int, seeds: Union[dict, np.ndarray] = None) -> np.ndarray:
if seeds is None:
return np.ones(n) / n
else:
seeds = check_seeds(seeds, n)
probs = np.zeros_like(seeds, dtype=float)
ix = (seeds > 0)
probs[ix] = seeds[ix]
w: float = probs.sum()
if w > 0:
return probs / w
else:
raise ValueError('At least one seeds must have a positive probability.')
| true
| true
|
79038c55ad7da113a70b0e7af3aba518741e5dde
| 1,803
|
py
|
Python
|
python-django/djmultidb/app1/management/commands/set_thing.py
|
dictoss/proto
|
972d8cb3d1b94d771be4c678d11927a6b478317f
|
[
"BSD-2-Clause"
] | null | null | null |
python-django/djmultidb/app1/management/commands/set_thing.py
|
dictoss/proto
|
972d8cb3d1b94d771be4c678d11927a6b478317f
|
[
"BSD-2-Clause"
] | 8
|
2020-02-28T20:25:16.000Z
|
2021-02-27T14:12:55.000Z
|
python-django/djmultidb/app1/management/commands/set_thing.py
|
dictoss/proto
|
972d8cb3d1b94d771be4c678d11927a6b478317f
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.conf import settings
from app1.models import Thing
class Command(BaseCommand):
args = '<id name>'
help = 'create or update thing model.'
use_settings = 'settings'
def handle(self, *args, **options):
"""
finished when raise CommandError, exit code = 1.
other exit code = 0
"""
_retcode = 1
_dbname = 'default'
try:
print('settings.ENV_MODE = %s' % (settings.ENV_MODE))
print('settings.DATABASES = %s' % (settings.DATABASES))
_id = int(args[0])
_name = args[1]
print('id: %s, name:%s' % (_id, _name))
qs = Thing.objects.filter(id=_id)
_nowdt = timezone.now()
if 0 < len(qs):
print('do update.')
_r = qs[0]
# _r.id
_r.name = _name
# _r.create_at
_r.update_at = _nowdt
_r.save(using=_dbname)
else:
print('do insert.')
if _id < 1:
_id = None
_t = Thing(
id=_id,
name=_name,
create_at=_nowdt,
update_at=_nowdt)
_t.save(using=_dbname)
except:
print('EXCEPT: %s(%s)' % (sys.exc_info()[0], sys.exc_info()[1]))
print('finished(ng)')
raise CommandError('ng')
# raise CommandError('ok')
print('finished(ok)')
sys.exit(0)
| 27.318182
| 76
| 0.460344
|
import os
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.conf import settings
from app1.models import Thing
class Command(BaseCommand):
args = '<id name>'
help = 'create or update thing model.'
use_settings = 'settings'
def handle(self, *args, **options):
_retcode = 1
_dbname = 'default'
try:
print('settings.ENV_MODE = %s' % (settings.ENV_MODE))
print('settings.DATABASES = %s' % (settings.DATABASES))
_id = int(args[0])
_name = args[1]
print('id: %s, name:%s' % (_id, _name))
qs = Thing.objects.filter(id=_id)
_nowdt = timezone.now()
if 0 < len(qs):
print('do update.')
_r = qs[0]
_r.name = _name
_r.update_at = _nowdt
_r.save(using=_dbname)
else:
print('do insert.')
if _id < 1:
_id = None
_t = Thing(
id=_id,
name=_name,
create_at=_nowdt,
update_at=_nowdt)
_t.save(using=_dbname)
except:
print('EXCEPT: %s(%s)' % (sys.exc_info()[0], sys.exc_info()[1]))
print('finished(ng)')
raise CommandError('ng')
print('finished(ok)')
sys.exit(0)
| true
| true
|
79038ddc5f95f32085d5a5e9cabd051d704108f4
| 508
|
py
|
Python
|
src/m101p/week02/lesson_files/hemmerling_week2_01.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
src/m101p/week02/lesson_files/hemmerling_week2_01.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
src/m101p/week02/lesson_files/hemmerling_week2_01.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
import pymongo
import sys
# establish a connection to the database
# note this uses the now deprecated Connection class, as we did in the lecture.
# MongoClient is the preferred way of connecting.
connection = pymongo.Connection("mongodb://localhost", safe=True)
# get a handle to the school database
db=connection.school
scores = db.scores
query = {''}
try:
doc = scores.find_one(query)
except:
print "Unexpected error:", sys.exc_info()[0]
print doc
| 23.090909
| 80
| 0.679134
|
import pymongo
import sys
connection = pymongo.Connection("mongodb://localhost", safe=True)
db=connection.school
scores = db.scores
query = {''}
try:
doc = scores.find_one(query)
except:
print "Unexpected error:", sys.exc_info()[0]
print doc
| false
| true
|
79039026c03ab9d987d6ab9c76340fb9530f7d99
| 1,040
|
py
|
Python
|
test/unit/reductions/exponentiated_gradient/simple_learners.py
|
Dref360/fairlearn
|
7042181add288c65174ac065f1474928e11f3f4c
|
[
"MIT"
] | 1
|
2020-09-02T05:59:56.000Z
|
2020-09-02T05:59:56.000Z
|
test/unit/reductions/exponentiated_gradient/simple_learners.py
|
chrinide/fairlearn
|
8f087fbb0b27740d10b31d95706bb175a4b4581c
|
[
"MIT"
] | 6
|
2021-03-11T00:38:07.000Z
|
2022-02-27T07:50:00.000Z
|
test/unit/reductions/exponentiated_gradient/simple_learners.py
|
chrinide/fairlearn
|
8f087fbb0b27740d10b31d95706bb175a4b4581c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
class LeastSquaresBinaryClassifierLearner:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
pred = X.dot(np.asarray(self.weights))
return 1 * (pred > 0.5)
class LeastSquaresRegressor:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
return X.dot(self.weights)
| 28.108108
| 64
| 0.623077
|
import numpy as np
import pandas as pd
class LeastSquaresBinaryClassifierLearner:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
pred = X.dot(np.asarray(self.weights))
return 1 * (pred > 0.5)
class LeastSquaresRegressor:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
return X.dot(self.weights)
| true
| true
|
7903904fe37652d8f18fd832aa590c0aec7e87a4
| 2,111
|
py
|
Python
|
toby.py
|
axxiao/toby
|
de64f4b2f5e39531d08143e99cf2785992010a13
|
[
"MIT"
] | null | null | null |
toby.py
|
axxiao/toby
|
de64f4b2f5e39531d08143e99cf2785992010a13
|
[
"MIT"
] | null | null | null |
toby.py
|
axxiao/toby
|
de64f4b2f5e39531d08143e99cf2785992010a13
|
[
"MIT"
] | null | null | null |
# The Core of Toby
from flask import Flask, request, jsonify, g
import os
import logging
from ax.log import trace_error
from ax.connection import DatabaseConnection
from ax.datetime import now
from ax.tools import load_function, get_uuid, decrypt
from ax.exception import InvalidToken
logger = logging.getLogger('werkzeug')
debug_flg = True if os.getenv('TOBY_DEBUG', 'True') == 'True' else False
token = os.environ['TOBY_TOKEN']
app = Flask('Toby')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.logger.setLevel(logging.DEBUG if debug_flg else logging.INFO)
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'db'):
g.db = DatabaseConnection(os.getenv('TOBY_DB_USER', 'toby'), os.environ['TOBY_DB_PASSWORD'])
return g.db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.disconnect()
if error:
logger.error('Database connection closed because of :' + str(error))
@app.route("/")
def ping():
return "<h1 style='color:blue'>Hello There! This is Toby</h1>"
@app.route("/process")
def process():
request_id = None
try:
in_param = request.get_json(force=True, silent=False, cache=False)
if decrypt(in_param['request_token']) != token:
# verify token
raise InvalidToken(in_param)
if 'request_id' not in in_param:
request_id = get_uuid()
in_param['request_id'] = request_id
else:
request_id = in_param['request_id']
if 'request_timestamp' not in in_param:
in_param['request_timestamp'] = now()
in_param['logger'] = logger
in_param['get_db_connection'] = get_db
func = load_function(in_param)
resp = func()
except:
e = trace_error(logger)
resp = {'request_id': request_id, 'request_status': 'error', 'request_error': str(e[-1])}
return jsonify(resp)
if __name__ == "__main__":
app.run()
| 30.157143
| 100
| 0.658456
|
from flask import Flask, request, jsonify, g
import os
import logging
from ax.log import trace_error
from ax.connection import DatabaseConnection
from ax.datetime import now
from ax.tools import load_function, get_uuid, decrypt
from ax.exception import InvalidToken
logger = logging.getLogger('werkzeug')
debug_flg = True if os.getenv('TOBY_DEBUG', 'True') == 'True' else False
token = os.environ['TOBY_TOKEN']
app = Flask('Toby')
app.logger.setLevel(logging.DEBUG if debug_flg else logging.INFO)
def get_db():
if not hasattr(g, 'db'):
g.db = DatabaseConnection(os.getenv('TOBY_DB_USER', 'toby'), os.environ['TOBY_DB_PASSWORD'])
return g.db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'db'):
g.db.disconnect()
if error:
logger.error('Database connection closed because of :' + str(error))
@app.route("/")
def ping():
return "<h1 style='color:blue'>Hello There! This is Toby</h1>"
@app.route("/process")
def process():
request_id = None
try:
in_param = request.get_json(force=True, silent=False, cache=False)
if decrypt(in_param['request_token']) != token:
raise InvalidToken(in_param)
if 'request_id' not in in_param:
request_id = get_uuid()
in_param['request_id'] = request_id
else:
request_id = in_param['request_id']
if 'request_timestamp' not in in_param:
in_param['request_timestamp'] = now()
in_param['logger'] = logger
in_param['get_db_connection'] = get_db
func = load_function(in_param)
resp = func()
except:
e = trace_error(logger)
resp = {'request_id': request_id, 'request_status': 'error', 'request_error': str(e[-1])}
return jsonify(resp)
if __name__ == "__main__":
app.run()
| true
| true
|
79039090b5ba670aae52d062c1e99a1d92faca54
| 209
|
py
|
Python
|
tests/test_activeLearning.py
|
sankhaMukherjee/activeLearning
|
a739280e2c9e026358ede62720c7d5e4d20b9e12
|
[
"MIT"
] | null | null | null |
tests/test_activeLearning.py
|
sankhaMukherjee/activeLearning
|
a739280e2c9e026358ede62720c7d5e4d20b9e12
|
[
"MIT"
] | null | null | null |
tests/test_activeLearning.py
|
sankhaMukherjee/activeLearning
|
a739280e2c9e026358ede62720c7d5e4d20b9e12
|
[
"MIT"
] | null | null | null |
import pytest
import activeLearning as tP
def test_sayHello():
assert tP.sayHello() == 'Hello World'
assert tP.sayHello('Sankha') == 'Hello Sankha'
assert tP.sayHello(-1) == 'Hello -1'
return
| 23.222222
| 50
| 0.669856
|
import pytest
import activeLearning as tP
def test_sayHello():
assert tP.sayHello() == 'Hello World'
assert tP.sayHello('Sankha') == 'Hello Sankha'
assert tP.sayHello(-1) == 'Hello -1'
return
| true
| true
|
7903926acbaa8d31eb11ec47f8286b0261947b45
| 4,519
|
py
|
Python
|
networking_bgp_ovn/drivers/openstack/utils/frr.py
|
luis5tb/networking-bgp-ovn
|
3c3d71bd045a971390fda89e5f0e724b490ee80f
|
[
"Apache-2.0"
] | 1
|
2022-01-28T14:38:53.000Z
|
2022-01-28T14:38:53.000Z
|
networking_bgp_ovn/drivers/openstack/utils/frr.py
|
luis5tb/networking-bgp-ovn
|
3c3d71bd045a971390fda89e5f0e724b490ee80f
|
[
"Apache-2.0"
] | null | null | null |
networking_bgp_ovn/drivers/openstack/utils/frr.py
|
luis5tb/networking-bgp-ovn
|
3c3d71bd045a971390fda89e5f0e724b490ee80f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from jinja2 import Template
from oslo_concurrency import processutils
from oslo_log import log as logging
from networking_bgp_ovn import constants
LOG = logging.getLogger(__name__)
ADD_VRF_TEMPLATE = '''
vrf {{ vrf_name }}
vni {{ vni }}
exit-vrf
router bgp {{ bgp_as }} vrf {{ vrf_name }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
address-family l2vpn evpn
advertise ipv4 unicast
advertise ipv6 unicast
exit-address-family
'''
DEL_VRF_TEMPLATE = '''
no vrf {{ vrf_name }}
no router bgp {{ bgp_as }} vrf {{ vrf_name }}
'''
LEAK_VRF_TEMPLATE = '''
router bgp {{ bgp_as }}
address-family ipv4 unicast
import vrf {{ vrf_name }}
exit-address-family
address-family ipv6 unicast
import vrf {{ vrf_name }}
exit-address-family
router bgp {{ bgp_as }} vrf {{ vrf_name }}
bgp router-id {{ bgp_router_id }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
'''
def _run_vtysh_config(frr_config_file):
vtysh_command = "copy {} running-config".format(frr_config_file)
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', vtysh_command]
try:
return processutils.execute(*full_args, run_as_root=True)
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _run_vtysh_command(command):
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', command]
try:
return processutils.execute(*full_args, run_as_root=True)[0]
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _get_router_id(bgp_as):
output = _run_vtysh_command(command='show ip bgp summary json')
return json.loads(output).get('ipv4Unicast', {}).get('routerId')
def vrf_leak(vrf, bgp_as, bgp_router_id=None):
LOG.info("Add VRF leak for VRF {} on router bgp {}".format(vrf, bgp_as))
if not bgp_router_id:
bgp_router_id = _get_router_id(bgp_as)
if not bgp_router_id:
LOG.error("Unknown router-id, needed for route leaking")
return
vrf_template = Template(LEAK_VRF_TEMPLATE)
vrf_config = vrf_template.render(vrf_name=vrf, bgp_as=bgp_as,
bgp_router_id=bgp_router_id)
frr_config_file = "frr-config-vrf-leak-{}".format(vrf)
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
def vrf_reconfigure(evpn_info, action):
LOG.info("FRR reconfiguration (action = {}) for evpn: {}".format(
action, evpn_info))
frr_config_file = None
if action == "add-vrf":
vrf_template = Template(ADD_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'],
vni=evpn_info['vni'])
frr_config_file = "frr-config-add-vrf-{}".format(evpn_info['vni'])
elif action == "del-vrf":
vrf_template = Template(DEL_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'])
frr_config_file = "frr-config-del-vrf-{}".format(evpn_info['vni'])
else:
LOG.error("Unknown FRR reconfiguration action: %s", action)
return
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
| 31.165517
| 77
| 0.669617
|
import json
from jinja2 import Template
from oslo_concurrency import processutils
from oslo_log import log as logging
from networking_bgp_ovn import constants
LOG = logging.getLogger(__name__)
ADD_VRF_TEMPLATE = '''
vrf {{ vrf_name }}
vni {{ vni }}
exit-vrf
router bgp {{ bgp_as }} vrf {{ vrf_name }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
address-family l2vpn evpn
advertise ipv4 unicast
advertise ipv6 unicast
exit-address-family
'''
DEL_VRF_TEMPLATE = '''
no vrf {{ vrf_name }}
no router bgp {{ bgp_as }} vrf {{ vrf_name }}
'''
LEAK_VRF_TEMPLATE = '''
router bgp {{ bgp_as }}
address-family ipv4 unicast
import vrf {{ vrf_name }}
exit-address-family
address-family ipv6 unicast
import vrf {{ vrf_name }}
exit-address-family
router bgp {{ bgp_as }} vrf {{ vrf_name }}
bgp router-id {{ bgp_router_id }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
'''
def _run_vtysh_config(frr_config_file):
vtysh_command = "copy {} running-config".format(frr_config_file)
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', vtysh_command]
try:
return processutils.execute(*full_args, run_as_root=True)
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _run_vtysh_command(command):
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', command]
try:
return processutils.execute(*full_args, run_as_root=True)[0]
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _get_router_id(bgp_as):
output = _run_vtysh_command(command='show ip bgp summary json')
return json.loads(output).get('ipv4Unicast', {}).get('routerId')
def vrf_leak(vrf, bgp_as, bgp_router_id=None):
LOG.info("Add VRF leak for VRF {} on router bgp {}".format(vrf, bgp_as))
if not bgp_router_id:
bgp_router_id = _get_router_id(bgp_as)
if not bgp_router_id:
LOG.error("Unknown router-id, needed for route leaking")
return
vrf_template = Template(LEAK_VRF_TEMPLATE)
vrf_config = vrf_template.render(vrf_name=vrf, bgp_as=bgp_as,
bgp_router_id=bgp_router_id)
frr_config_file = "frr-config-vrf-leak-{}".format(vrf)
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
def vrf_reconfigure(evpn_info, action):
LOG.info("FRR reconfiguration (action = {}) for evpn: {}".format(
action, evpn_info))
frr_config_file = None
if action == "add-vrf":
vrf_template = Template(ADD_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'],
vni=evpn_info['vni'])
frr_config_file = "frr-config-add-vrf-{}".format(evpn_info['vni'])
elif action == "del-vrf":
vrf_template = Template(DEL_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'])
frr_config_file = "frr-config-del-vrf-{}".format(evpn_info['vni'])
else:
LOG.error("Unknown FRR reconfiguration action: %s", action)
return
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
| true
| true
|
7903928af3e822cd867926c5aa1b2c70382d029b
| 481
|
py
|
Python
|
scripts/fiftyone_sample.py
|
bikramA/sample-code
|
47efe43583046a1aa31660872d30bea5669e827a
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/fiftyone_sample.py
|
bikramA/sample-code
|
47efe43583046a1aa31660872d30bea5669e827a
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/fiftyone_sample.py
|
bikramA/sample-code
|
47efe43583046a1aa31660872d30bea5669e827a
|
[
"BSD-2-Clause"
] | null | null | null |
import fiftyone as fo
import fiftyone.zoo as foz
# Load Dataset
dataset = foz.load_zoo_dataset("coco-2017", split="validation")
# Randomly select 20 samples on which to generate predictions
view = dataset.take(20)
# Load zoo model
model = foz.load_zoo_model("keypoint-rcnn-resnet50-fpn-coco-torch")
# Run Inference
view.apply_model(model, label_field="predictions")
# Launch the FiftyOne App to visualize your dataset
session = fo.launch_app(dataset)
session.view = view
| 21.863636
| 67
| 0.77131
|
import fiftyone as fo
import fiftyone.zoo as foz
dataset = foz.load_zoo_dataset("coco-2017", split="validation")
view = dataset.take(20)
model = foz.load_zoo_model("keypoint-rcnn-resnet50-fpn-coco-torch")
view.apply_model(model, label_field="predictions")
session = fo.launch_app(dataset)
session.view = view
| true
| true
|
790392b5f069c2c394c4071f1fe6c1063d4ce649
| 4,837
|
py
|
Python
|
portfolio_functions.py
|
MaxGosselin/portfolio_optimizer
|
a137d5b029aff0b584adb9df0ba8bf1831731882
|
[
"MIT",
"Unlicense"
] | 3
|
2019-03-28T15:38:52.000Z
|
2020-12-16T21:11:30.000Z
|
portfolio_functions.py
|
MaxGosselin/portfolio_optimizer
|
a137d5b029aff0b584adb9df0ba8bf1831731882
|
[
"MIT",
"Unlicense"
] | null | null | null |
portfolio_functions.py
|
MaxGosselin/portfolio_optimizer
|
a137d5b029aff0b584adb9df0ba8bf1831731882
|
[
"MIT",
"Unlicense"
] | null | null | null |
'''
A collection of functions to perform portfolio analysis.
Max Gosselin, 2019
'''
import numpy as np
import pandas as pd
from scipy import optimize
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
''' Compute basic portfolio metrics: return, stdv, sharpe ratio '''
portfolio_return = np.sum(weights * avg_xs_returns)
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = portfolio_return / portfolio_stdv
tickers = covariance_matrix.columns
metrics = {
'return': portfolio_return,
'stdv': portfolio_stdv,
'sharpe': portfolio_sharpe,
'weights': weights
}
metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items())
return metrics
def simulate_portfolios(iters, xs_stats, covariance_matrix):
''' What we want here is to randomly generate portfolios that will sit
inside the efficiency frontier for illustrative purposes '''
# Set up an empty array to store our generated portfolios
simulations = []
while iters > 1:
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
def solve_minvar(xs_avg, covariance_matrix):
''' Solve for the weights of the minimum variance portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_maxsharpe(xs_avg, covariance_matrix):
''' Solve for the weights of the maximum Sharpe ratio portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '''
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return -pm['return'] / pm['stdv']
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_for_target_return(xs_avg, covariance_matrix, target):
''' Solve for the weights of the minimum variance portfolio which has
a specific targeted return.
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
portfolio return = target return,
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
''' Anonymous function to check equality with the target return '''
return np.sum(weights * xs_avg)
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
{'type': 'eq', 'fun': lambda x: __match_target(x) - target},
]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def generate_efficient_frontier(targets, xs_avg, covariance_matrix):
portfolios = []
for target in targets:
p_weights = solve_for_target_return(xs_avg, covariance_matrix, target)
portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix))
return portfolios
| 32.682432
| 112
| 0.647509
|
import numpy as np
import pandas as pd
from scipy import optimize
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
portfolio_return = np.sum(weights * avg_xs_returns)
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = portfolio_return / portfolio_stdv
tickers = covariance_matrix.columns
metrics = {
'return': portfolio_return,
'stdv': portfolio_stdv,
'sharpe': portfolio_sharpe,
'weights': weights
}
metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items())
return metrics
def simulate_portfolios(iters, xs_stats, covariance_matrix):
simulations = []
while iters > 1:
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
def solve_minvar(xs_avg, covariance_matrix):
def __minvar(weights, xs_avg, covariance_matrix):
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_maxsharpe(xs_avg, covariance_matrix):
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return -pm['return'] / pm['stdv']
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_for_target_return(xs_avg, covariance_matrix, target):
def __minvar(weights, xs_avg, covariance_matrix):
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
return np.sum(weights * xs_avg)
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
{'type': 'eq', 'fun': lambda x: __match_target(x) - target},
]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def generate_efficient_frontier(targets, xs_avg, covariance_matrix):
portfolios = []
for target in targets:
p_weights = solve_for_target_return(xs_avg, covariance_matrix, target)
portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix))
return portfolios
| true
| true
|
7903935e85620581cc46d245843af981f771fb1d
| 102,390
|
py
|
Python
|
pudb/debugger.py
|
ranelpadon/pudb
|
634393f0cb482139af0419c637f2e84b8bb90d16
|
[
"MIT"
] | null | null | null |
pudb/debugger.py
|
ranelpadon/pudb
|
634393f0cb482139af0419c637f2e84b8bb90d16
|
[
"MIT"
] | null | null | null |
pudb/debugger.py
|
ranelpadon/pudb
|
634393f0cb482139af0419c637f2e84b8bb90d16
|
[
"MIT"
] | 1
|
2021-05-13T13:15:47.000Z
|
2021-05-13T13:15:47.000Z
|
__copyright__ = """
Copyright (C) 2009-2017 Andreas Kloeckner
Copyright (C) 2014-2017 Aaron Meurer
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urwid
import bdb
import gc
import os
import sys
from itertools import count
from functools import partial
from types import TracebackType
from pudb.lowlevel import decode_lines, ui_log
from pudb.settings import load_config, save_config
CONFIG = load_config()
save_config(CONFIG)
HELP_HEADER = r"""
Key Assignments: Use Arrow Down/Up or Page Down/Up to scroll.
"""
HELP_MAIN = r"""
Keys:
Ctrl-p - edit preferences
n - step over ("next")
s - step into
c - continue
r/f - finish current function
t - run to cursor
e - show traceback [post-mortem or in exception state]
b - set/clear breakpoint
Ctrl-e - open file at current line to edit with $EDITOR
H - move to current line (bottom of stack)
u - move up one stack frame
d - move down one stack frame
o - show console/output screen
m - open module
j/k - down/up
l/h - right/left
Ctrl-f/b - page down/up
Ctrl-d/u - page down/up
G/g - end/home
L - show (file/line) location / go to line
/ - search
,/. - search next/previous
V - focus variables
S - focus stack
B - focus breakpoint list
C - focus code
F1/? - show this help screen
q - quit
Ctrl-r - reload breakpoints from saved-breakpoints file
Ctrl-c - when in continue mode, break back to PuDB
Ctrl-l - redraw screen
Shell-related:
! - open the external shell (configured in the settings)
Ctrl-x - toggle the internal shell focus
+/- - grow/shrink inline shell (active in command line history)
_/= - minimize/maximize inline shell (active in command line history)
Ctrl-v - insert newline
Ctrl-n/p - browse command line history
Tab - yes, there is (simple) tab completion
"""
HELP_SIDE = r"""
Sidebar-related (active in sidebar):
+/- - grow/shrink sidebar
_/= - minimize/maximize sidebar
[/] - grow/shrink relative size of active sidebar box
Keys in variables list:
\/enter/space - expand/collapse
h - collapse
l - expand
d/t/r/s/i/c - show default/type/repr/str/id/custom for this variable
H - toggle highlighting
@ - toggle repetition at top
* - cycle attribute visibility: public/_private/__dunder__
m - toggle method visibility
w - toggle line wrapping
n/insert - add new watch expression
e - edit options (also to delete)
Keys in stack list:
enter - jump to frame
Ctrl-e - open file at line to edit with $EDITOR
Keys in breakpoints list:
enter - jump to breakpoint
b - toggle breakpoint
d - delete breakpoint
e - edit breakpoint
Other keys:
j/k - down/up
l/h - right/left
Ctrl-f/b - page down/up
Ctrl-d/u - page down/up
G/g - end/home
V - focus variables
S - focus stack
B - focus breakpoint list
C - focus code
F1/? - show this help screen
q - quit
Ctrl-l - redraw screen
"""
HELP_LICENSE = r"""
License:
--------
PuDB is licensed to you under the MIT/X Consortium license:
Copyright (c) 2009-16 Andreas Kloeckner and contributors
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# {{{ debugger interface
class Debugger(bdb.Bdb):
def __init__(self, stdin=None, stdout=None, term_size=None, steal_output=False,
**kwargs):
# Pass remaining kwargs to python debugger framework
bdb.Bdb.__init__(self, **kwargs)
self.ui = DebuggerUI(self, stdin=stdin, stdout=stdout, term_size=term_size)
self.steal_output = steal_output
self.setup_state()
if steal_output:
raise NotImplementedError("output stealing")
from io import StringIO
self.stolen_output = sys.stderr = sys.stdout = StringIO()
sys.stdin = StringIO("") # avoid spurious hangs
from pudb.settings import load_breakpoints
for bpoint_descr in load_breakpoints():
self.set_break(*bpoint_descr)
# These (dispatch_line and set_continue) are copied from bdb with the
# patch from https://bugs.python.org/issue16482 applied. See
# https://github.com/inducer/pudb/pull/90.
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting:
raise bdb.BdbQuit
# Do not re-install the local trace when we are finished debugging,
# see issues 16482 and 7238.
if not sys.gettrace():
return None
return self.trace_dispatch
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame:
del frame.f_trace
if frame is self.botframe:
break
frame = frame.f_back
def set_trace(self, frame=None, as_breakpoint=None, paused=True):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
Unlike Bdb.set_trace(), this does not call self.reset(), which causes
the debugger to enter bdb source code. This also implements treating
set_trace() calls as breakpoints in the PuDB UI.
If as_breakpoint=True (the default), this call will be treated like a
breakpoint in the UI (you can press 'b' on it to disable breaking
here).
If paused=False, the debugger will not break here.
"""
if as_breakpoint is None:
if not paused:
as_breakpoint = False
else:
as_breakpoint = True
if frame is None:
frame = thisframe = sys._getframe().f_back
else:
thisframe = frame
# See pudb issue #52. If this works well enough we should upstream to
# stdlib bdb.py.
#self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
thisframe_info = (
self.canonic(thisframe.f_code.co_filename), thisframe.f_lineno)
if thisframe_info not in self.set_traces or self.set_traces[thisframe_info]:
if as_breakpoint:
self.set_traces[thisframe_info] = True
if self.ui.source_code_provider is not None:
self.ui.set_source_code_provider(
self.ui.source_code_provider, force_update=True)
if paused:
self.set_step()
else:
self.set_continue()
sys.settrace(self.trace_dispatch)
else:
return
def save_breakpoints(self):
from pudb.settings import save_breakpoints
save_breakpoints([
bp
for fn, bp_lst in self.get_all_breaks().items()
for lineno in bp_lst
for bp in self.get_breaks(fn, lineno)
if not bp.temporary])
def enter_post_mortem(self, exc_tuple):
self.post_mortem = True
def setup_state(self):
self.bottom_frame = None
self.mainpyfile = ""
self._wait_for_mainpyfile = False
self.current_bp = None
self.post_mortem = False
# Mapping of (filename, lineno) to bool. If True, will stop on the
# set_trace() call at that location.
self.set_traces = {}
def restart(self):
from linecache import checkcache
checkcache()
self.ui.set_source_code_provider(NullSourceCodeProvider())
self.setup_state()
def do_clear(self, arg):
self.clear_bpbynumber(int(arg))
def set_frame_index(self, index):
self.curindex = index
if index < 0 or index >= len(self.stack):
return
self.curframe, lineno = self.stack[index]
filename = self.curframe.f_code.co_filename
import linecache
if not linecache.getlines(filename):
code = self.curframe.f_globals.get("_MODULE_SOURCE_CODE")
if code is not None:
self.ui.set_current_line(lineno,
DirectSourceCodeProvider(
self.curframe.f_code.co_name, code))
else:
self.ui.set_current_line(lineno,
NullSourceCodeProvider())
else:
self.ui.set_current_line(lineno,
FileSourceCodeProvider(self, filename))
self.ui.update_var_view()
self.ui.update_stack()
self.ui.stack_list._w.set_focus(self.ui.translate_ui_stack_index(index))
@staticmethod
def open_file_to_edit(filename, line_number):
if not os.path.isfile(filename):
raise FileNotFoundError(f"'{filename}' not found or is not a file.")
if not line_number:
line_number = 1
editor = os.environ.get("EDITOR", "nano")
import subprocess
subprocess.call([editor, f"+{line_number}", filename], shell=False)
return filename
def move_up_frame(self):
if self.curindex > 0:
self.set_frame_index(self.curindex-1)
def move_down_frame(self):
if self.curindex < len(self.stack)-1:
self.set_frame_index(self.curindex+1)
def get_shortened_stack(self, frame, tb):
stack, index = self.get_stack(frame, tb)
for i, (s_frame, lineno) in enumerate(stack):
if s_frame is self.bottom_frame and index >= i:
stack = stack[i:]
index -= i
return stack, index
def interaction(self, frame, exc_tuple=None, show_exc_dialog=True):
if exc_tuple is None:
tb = None
elif isinstance(exc_tuple, TracebackType):
# For API compatibility with other debuggers, the second variable
# can be a traceback object. In that case, we need to retrieve the
# corresponding exception tuple.
tb = exc_tuple
exc, = (exc for exc in gc.get_referrers(tb)
if getattr(exc, "__traceback__", None) is tb)
exc_tuple = type(exc), exc, tb
else:
tb = exc_tuple[2]
if frame is None and tb is not None:
frame = tb.tb_frame
found_bottom_frame = False
walk_frame = frame
while True:
if walk_frame is self.bottom_frame:
found_bottom_frame = True
break
if walk_frame is None:
break
walk_frame = walk_frame.f_back
if not found_bottom_frame and not self.post_mortem:
return
self.stack, index = self.get_shortened_stack(frame, tb)
if self.post_mortem:
index = len(self.stack)-1
self.set_frame_index(index)
self.ui.call_with_ui(self.ui.interaction, exc_tuple,
show_exc_dialog=show_exc_dialog)
def get_stack_situation_id(self):
return str(id(self.stack[self.curindex][0].f_code))
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.interaction(frame)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if "__exc_tuple__" in frame.f_locals:
del frame.f_locals["__exc_tuple__"]
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
self.bottom_frame = frame
if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno):
self.current_bp = (
self.canonic(frame.f_code.co_filename), frame.f_lineno)
else:
self.current_bp = None
try:
self.ui.update_breakpoints()
self.interaction(frame)
except Exception:
self.ui.show_internal_exc_dlg(sys.exc_info())
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if frame.f_code.co_name != "<module>":
frame.f_locals["__return__"] = return_value
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
self.bottom_frame = frame
if "__exc_tuple__" not in frame.f_locals:
self.interaction(frame)
def user_exception(self, frame, exc_tuple):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals["__exc_tuple__"] = exc_tuple
if not self._wait_for_mainpyfile:
self.interaction(frame, exc_tuple)
def _runscript(self, filename):
# Provide separation from current __main__, which is likely
# pudb.__main__ run. Preserving its namespace is not important, and
# having the script share it ensures that, e.g., pickle can find
# types defined there:
# https://github.com/inducer/pudb/issues/331
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({
"__name__": "__main__",
"__file__": filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
statement = 'exec(compile(open("{}").read(), "{}", "exec"))'.format(
filename, filename)
# Set up an interrupt handler
from pudb import set_interrupt_handler
set_interrupt_handler()
# Implicitly runs in the namespace of __main__.
self.run(statement)
def _runmodule(self, module_name):
# This is basically stolen from the pdb._runmodule from CPython 3.8
# https://github.com/python/cpython/blob/a1d3be4623c8ec7069bd34ccdce336be9cdeb644/Lib/pdb.py#L1530
import runpy
mod_name, mod_spec, code = runpy._get_module_details(module_name)
self.mainpyfile = self.canonic(code.co_filename)
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({
"__name__": "__main__",
"__file__": self.mainpyfile,
"__spec__": mod_spec,
"__builtins__": __builtins__,
"__package__": mod_spec.parent,
"__loader__": mod_spec.loader,
})
self._wait_for_mainpyfile = True
self.run(code)
# }}}
# UI stuff --------------------------------------------------------------------
from pudb.ui_tools import make_hotkey_markup, labelled_value, \
SelectableText, SignalWrap, StackFrame, BreakpointFrame
from pudb.var_view import FrameVarInfoKeeper
# {{{ display setup
try:
import curses
except ImportError:
curses = None
from urwid.raw_display import Screen as RawScreen
try:
from urwid.curses_display import Screen as CursesScreen
except ImportError:
CursesScreen = None
class ThreadsafeScreenMixin:
"""A Screen subclass that doesn't crash when running from a non-main thread."""
def signal_init(self):
"""Initialize signal handler, ignoring errors silently."""
try:
super().signal_init()
except ValueError:
pass
def signal_restore(self):
"""Restore default signal handler, ignoring errors silently."""
try:
super().signal_restore()
except ValueError:
pass
class ThreadsafeRawScreen(ThreadsafeScreenMixin, RawScreen):
pass
class ThreadsafeFixedSizeRawScreen(ThreadsafeScreenMixin, RawScreen):
def __init__(self, **kwargs):
self._term_size = kwargs.pop("term_size", None)
super().__init__(**kwargs)
def get_cols_rows(self):
if self._term_size is not None:
return self._term_size
else:
return 80, 24
if curses is not None:
class ThreadsafeCursesScreen(ThreadsafeScreenMixin, RawScreen):
pass
# }}}
# {{{ source code providers
class SourceCodeProvider:
def __ne__(self, other):
return not (self == other)
class NullSourceCodeProvider(SourceCodeProvider):
def __eq__(self, other):
return type(self) == type(other)
def identifier(self):
return "<no source code>"
def get_source_identifier(self):
return None
def clear_cache(self):
pass
def get_lines(self, debugger_ui):
from pudb.source_view import SourceLine
return [
SourceLine(debugger_ui, "<no source code available>"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, "If this is generated code and you would "
"like the source code to show up here,"),
SourceLine(debugger_ui, "add it to linecache.cache, like"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, " import linecache"),
SourceLine(debugger_ui, " linecache.cache[filename] = "
"(size, mtime, lines, fullname)"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, "You can also set the attribute "
"_MODULE_SOURCE_CODE in the module in which this function"),
SourceLine(debugger_ui, "was compiled to a string containing "
"the code."),
]
class FileSourceCodeProvider(SourceCodeProvider):
def __init__(self, debugger, file_name):
self.file_name = debugger.canonic(file_name)
def __eq__(self, other):
return type(self) == type(other) and self.file_name == other.file_name
def identifier(self):
return self.file_name
def get_source_identifier(self):
return self.file_name
def clear_cache(self):
from linecache import clearcache
clearcache()
def get_lines(self, debugger_ui):
from pudb.source_view import SourceLine, format_source
if self.file_name == "<string>":
return [SourceLine(debugger_ui, self.file_name)]
breakpoints = debugger_ui.debugger.get_file_breaks(self.file_name)[:]
breakpoints = [lineno for lineno in breakpoints if
any(bp.enabled
for bp in debugger_ui.debugger.get_breaks(self.file_name, lineno))]
breakpoints += [i for f, i in debugger_ui.debugger.set_traces if f
== self.file_name and debugger_ui.debugger.set_traces[f, i]]
try:
from linecache import getlines
lines = getlines(self.file_name)
return format_source(
debugger_ui, list(decode_lines(lines)), set(breakpoints))
except Exception:
from pudb.lowlevel import format_exception
debugger_ui.message("Could not load source file '{}':\n\n{}".format(
self.file_name, "".join(format_exception(sys.exc_info()))),
title="Source Code Load Error")
return [SourceLine(debugger_ui,
"Error while loading '%s'." % self.file_name)]
class DirectSourceCodeProvider(SourceCodeProvider):
def __init__(self, func_name, code):
self.function_name = func_name
self.code = code
def __eq__(self, other):
return (
type(self) == type(other)
and self.function_name == other.function_name
and self.code is other.code)
def identifier(self):
return "<source code of function %s>" % self.function_name
def get_source_identifier(self):
return None
def clear_cache(self):
pass
def get_lines(self, debugger_ui):
from pudb.source_view import format_source
lines = self.code.splitlines(True)
return format_source(debugger_ui, list(decode_lines(lines)), set())
# }}}
class DebuggerUI(FrameVarInfoKeeper):
# {{{ constructor
def __init__(self, dbg, stdin, stdout, term_size):
FrameVarInfoKeeper.__init__(self)
self.debugger = dbg
from urwid import AttrMap
from pudb.ui_tools import SearchController
self.search_controller = SearchController(self)
self.last_module_filter = ""
# {{{ build ui
# {{{ key bindings
def move_up(w, size, key):
w.keypress(size, "up")
def move_down(w, size, key):
w.keypress(size, "down")
def move_left(w, size, key):
w.keypress(size, "left")
def move_right(w, size, key):
w.keypress(size, "right")
def page_up(w, size, key):
w.keypress(size, "page up")
def page_down(w, size, key):
w.keypress(size, "page down")
def move_home(w, size, key):
w.keypress(size, "home")
def move_end(w, size, key):
w.keypress(size, "end")
def add_vi_nav_keys(widget):
widget.listen("k", move_up)
widget.listen("j", move_down)
widget.listen("h", move_left)
widget.listen("l", move_right)
widget.listen("ctrl b", page_up)
widget.listen("ctrl f", page_down)
widget.listen("ctrl u", page_up)
widget.listen("ctrl d", page_down)
widget.listen("g", move_home)
widget.listen("G", move_end)
def add_help_keys(widget, helpfunc):
widget.listen("f1", helpfunc)
widget.listen("?", helpfunc)
# }}}
# {{{ left/source column
self.source = urwid.SimpleListWalker([])
self.source_list = urwid.ListBox(self.source)
self.source_sigwrap = SignalWrap(self.source_list)
self.source_attr = urwid.AttrMap(self.source_sigwrap, "source")
self.source_hscroll_start = 0
self.cmdline_history = []
self.cmdline_history_position = -1
self.cmdline_contents = urwid.SimpleFocusListWalker([])
self.cmdline_list = urwid.ListBox(self.cmdline_contents)
self.cmdline_edit = urwid.Edit([
("command line prompt", ">>> ")
])
cmdline_edit_attr = urwid.AttrMap(self.cmdline_edit, "command line edit")
self.cmdline_edit_sigwrap = SignalWrap(
cmdline_edit_attr, is_preemptive=True)
def clear_cmdline_history(btn):
del self.cmdline_contents[:]
self.cmdline_edit_bar = urwid.Columns([
self.cmdline_edit_sigwrap,
("fixed", 10, AttrMap(
urwid.Button("Clear", clear_cmdline_history),
"command line clear button", "command line focused button"))
])
self.cmdline_pile = urwid.Pile([
("flow", urwid.Text("Command line: [Ctrl-X]")),
("weight", 1, urwid.AttrMap(self.cmdline_list, "command line output")),
("flow", self.cmdline_edit_bar),
])
self.cmdline_sigwrap = SignalWrap(
urwid.AttrMap(self.cmdline_pile, None, "focused sidebar")
)
self.cmdline_on = not CONFIG["hide_cmdline_win"]
self.cmdline_weight = 1
self.lhs_col = urwid.Pile([
("weight", 5, self.source_attr),
("weight", self.cmdline_weight if self.cmdline_on else 0,
self.cmdline_sigwrap),
])
# }}}
# {{{ right column
self.locals = urwid.SimpleListWalker([])
self.var_list = SignalWrap(
urwid.ListBox(self.locals))
self.stack_walker = urwid.SimpleListWalker([])
self.stack_list = SignalWrap(
urwid.ListBox(self.stack_walker))
self.bp_walker = urwid.SimpleListWalker([])
self.bp_list = SignalWrap(
urwid.ListBox(self.bp_walker))
self.rhs_col = urwid.Pile([
("weight", float(CONFIG["variables_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Variables:"))),
AttrMap(self.var_list, "variables"),
]), None, "focused sidebar"),),
("weight", float(CONFIG["stack_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Stack:"))),
AttrMap(self.stack_list, "stack"),
]), None, "focused sidebar"),),
("weight", float(CONFIG["breakpoints_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Breakpoints:"))),
AttrMap(self.bp_list, "breakpoint"),
]), None, "focused sidebar"),),
])
self.rhs_col_sigwrap = SignalWrap(self.rhs_col)
def helpside(w, size, key):
help(HELP_HEADER + HELP_SIDE + HELP_MAIN + HELP_LICENSE)
add_vi_nav_keys(self.rhs_col_sigwrap)
add_help_keys(self.rhs_col_sigwrap, helpside)
# }}}
self.columns = urwid.Columns(
[
("weight", 1, self.lhs_col),
("weight", float(CONFIG["sidebar_width"]),
self.rhs_col_sigwrap),
],
dividechars=1)
self.caption = urwid.Text("")
header = urwid.AttrMap(self.caption, "header")
self.top = SignalWrap(urwid.Frame(
urwid.AttrMap(self.columns, "background"),
header))
# }}}
def change_rhs_box(name, index, direction, w, size, key):
from pudb.settings import save_config
weight = self.rhs_col.item_types[index][1]
if direction < 0:
if weight > 1/5:
weight /= 1.25
else:
if weight < 5:
weight *= 1.25
CONFIG[name+"_weight"] = weight
save_config(CONFIG)
self.rhs_col.item_types[index] = "weight", weight
self.rhs_col._invalidate()
# {{{ variables listeners
def get_inspect_info(id_path, read_only=False):
return (self.get_frame_var_info(read_only)
.get_inspect_info(id_path, read_only))
def collapse_current(var, pos, iinfo):
if iinfo.show_detail:
# collapse current variable
iinfo.show_detail = False
else:
# collapse parent/container variable
if var.parent is not None:
p_iinfo = get_inspect_info(var.parent.id_path)
p_iinfo.show_detail = False
return self.locals.index(var.parent)
return None
def change_var_state(w, size, key):
var, pos = self.var_list._w.get_focus()
if var is None:
return
iinfo = get_inspect_info(var.id_path)
focus_index = None
if key == "enter" or key == "\\" or key == " ":
iinfo.show_detail = not iinfo.show_detail
elif key == "h":
focus_index = collapse_current(var, pos, iinfo)
elif key == "l":
iinfo.show_detail = True
elif key == "d":
iinfo.display_type = "default"
elif key == "t":
iinfo.display_type = "type"
elif key == "r":
iinfo.display_type = "repr"
elif key == "s":
iinfo.display_type = "str"
elif key == "i":
iinfo.display_type = "id"
elif key == "c":
iinfo.display_type = CONFIG["custom_stringifier"]
elif key == "H":
iinfo.highlighted = not iinfo.highlighted
elif key == "@":
iinfo.repeated_at_top = not iinfo.repeated_at_top
elif key == "*":
levels = ["public", "private", "all", "public"]
iinfo.access_level = levels[levels.index(iinfo.access_level)+1]
elif key == "w":
iinfo.wrap = not iinfo.wrap
elif key == "m":
iinfo.show_methods = not iinfo.show_methods
self.update_var_view(focus_index=focus_index)
def edit_inspector_detail(w, size, key):
var, pos = self.var_list._w.get_focus()
if var is None:
return
fvi = self.get_frame_var_info(read_only=False)
iinfo = fvi.get_inspect_info(var.id_path, read_only=False)
buttons = [
("OK", True),
("Cancel", False),
]
if var.watch_expr is not None:
watch_edit = urwid.Edit([
("label", "Watch expression: ")
], var.watch_expr.expression)
id_segment = [urwid.AttrMap(watch_edit, "value"), urwid.Text("")]
buttons.extend([None, ("Delete", "del")])
title = "Watch Expression Options"
else:
id_segment = [
labelled_value("Identifier Path: ", var.id_path),
urwid.Text(""),
]
title = "Variable Inspection Options"
rb_grp_show = []
rb_show_default = urwid.RadioButton(rb_grp_show, "Default",
iinfo.display_type == "default")
rb_show_type = urwid.RadioButton(rb_grp_show, "Show type()",
iinfo.display_type == "type")
rb_show_repr = urwid.RadioButton(rb_grp_show, "Show repr()",
iinfo.display_type == "repr")
rb_show_str = urwid.RadioButton(rb_grp_show, "Show str()",
iinfo.display_type == "str")
rb_show_id = urwid.RadioButton(rb_grp_show, "Show id()",
iinfo.display_type == "id")
rb_show_custom = urwid.RadioButton(
rb_grp_show, "Show custom (set in prefs)",
iinfo.display_type == CONFIG["custom_stringifier"])
rb_grp_access = []
rb_access_public = urwid.RadioButton(rb_grp_access, "Public members",
iinfo.access_level == "public")
rb_access_private = urwid.RadioButton(
rb_grp_access, "Public and private members",
iinfo.access_level == "private")
rb_access_all = urwid.RadioButton(
rb_grp_access, "All members (including __dunder__)",
iinfo.access_level == "all")
wrap_checkbox = urwid.CheckBox("Line Wrap", iinfo.wrap)
expanded_checkbox = urwid.CheckBox("Expanded", iinfo.show_detail)
highlighted_checkbox = urwid.CheckBox("Highlighted", iinfo.highlighted)
repeated_at_top_checkbox = urwid.CheckBox(
"Repeated at top", iinfo.repeated_at_top)
show_methods_checkbox = urwid.CheckBox(
"Show methods", iinfo.show_methods)
lb = urwid.ListBox(urwid.SimpleListWalker(
id_segment
+ rb_grp_show + [urwid.Text("")]
+ rb_grp_access + [urwid.Text("")]
+ [
wrap_checkbox,
expanded_checkbox,
highlighted_checkbox,
repeated_at_top_checkbox,
show_methods_checkbox,
]))
result = self.dialog(lb, buttons, title=title)
if result is True:
iinfo.show_detail = expanded_checkbox.get_state()
iinfo.wrap = wrap_checkbox.get_state()
iinfo.highlighted = highlighted_checkbox.get_state()
iinfo.repeated_at_top = repeated_at_top_checkbox.get_state()
iinfo.show_methods = show_methods_checkbox.get_state()
if rb_show_default.get_state():
iinfo.display_type = "default"
elif rb_show_type.get_state():
iinfo.display_type = "type"
elif rb_show_repr.get_state():
iinfo.display_type = "repr"
elif rb_show_str.get_state():
iinfo.display_type = "str"
elif rb_show_id.get_state():
iinfo.display_type = "id"
elif rb_show_custom.get_state():
iinfo.display_type = CONFIG["custom_stringifier"]
if rb_access_public.get_state():
iinfo.access_level = "public"
elif rb_access_private.get_state():
iinfo.access_level = "private"
elif rb_access_all.get_state():
iinfo.access_level = "all"
if var.watch_expr is not None:
var.watch_expr.expression = watch_edit.get_edit_text()
elif result == "del":
for i, watch_expr in enumerate(fvi.watches):
if watch_expr is var.watch_expr:
del fvi.watches[i]
self.update_var_view()
def insert_watch(w, size, key):
watch_edit = urwid.Edit([
("label", "Watch expression: ")
])
if self.dialog(
urwid.ListBox(urwid.SimpleListWalker([
urwid.AttrMap(watch_edit, "value")
])),
[
("OK", True),
("Cancel", False),
], title="Add Watch Expression"):
from pudb.var_view import WatchExpression
we = WatchExpression(watch_edit.get_edit_text())
fvi = self.get_frame_var_info(read_only=False)
fvi.watches.append(we)
self.update_var_view()
self.var_list.listen("\\", change_var_state)
self.var_list.listen(" ", change_var_state)
self.var_list.listen("h", change_var_state)
self.var_list.listen("l", change_var_state)
self.var_list.listen("d", change_var_state)
self.var_list.listen("t", change_var_state)
self.var_list.listen("r", change_var_state)
self.var_list.listen("s", change_var_state)
self.var_list.listen("i", change_var_state)
self.var_list.listen("c", change_var_state)
self.var_list.listen("H", change_var_state)
self.var_list.listen("@", change_var_state)
self.var_list.listen("*", change_var_state)
self.var_list.listen("w", change_var_state)
self.var_list.listen("m", change_var_state)
self.var_list.listen("enter", change_var_state)
self.var_list.listen("e", edit_inspector_detail)
self.var_list.listen("n", insert_watch)
self.var_list.listen("insert", insert_watch)
self.var_list.listen("[", partial(change_rhs_box, "variables", 0, -1))
self.var_list.listen("]", partial(change_rhs_box, "variables", 0, 1))
# }}}
# {{{ stack listeners
def examine_frame(w, size, key):
_, pos = self.stack_list._w.get_focus()
self.debugger.set_frame_index(self.translate_ui_stack_index(pos))
self.stack_list.listen("enter", examine_frame)
def open_file_editor(file_name, line_number):
file_changed = False
try:
original_modification_time = os.path.getmtime(file_name)
self.screen.stop()
filename_edited = self.debugger.open_file_to_edit(file_name,
line_number)
self.screen.start()
new_modification_time = os.path.getmtime(file_name)
file_changed = new_modification_time - original_modification_time > 0
except Exception:
from traceback import format_exception
self.message("Exception happened when trying to edit the file:"
"\n\n%s" % ("".join(format_exception(*sys.exc_info()))),
title="File Edit Error")
return
if file_changed:
self.message("File is changed, but the execution is continued with"
" the 'old' codebase.\n"
f"Changed file: {filename_edited}\n\n"
"Please quit and restart to see changes",
title="File is changed")
def open_editor_on_stack_frame(w, size, key):
_, pos = self.stack_list._w.get_focus()
index = self.translate_ui_stack_index(pos)
curframe, line_number = self.debugger.stack[index]
file_name = curframe.f_code.co_filename
open_file_editor(file_name, line_number)
self.stack_list.listen("ctrl e", open_editor_on_stack_frame)
def move_stack_top(w, size, key):
self.debugger.set_frame_index(len(self.debugger.stack)-1)
def move_stack_up(w, size, key):
self.debugger.move_up_frame()
def move_stack_down(w, size, key):
self.debugger.move_down_frame()
self.stack_list.listen("H", move_stack_top)
self.stack_list.listen("u", move_stack_up)
self.stack_list.listen("d", move_stack_down)
self.stack_list.listen("[", partial(change_rhs_box, "stack", 1, -1))
self.stack_list.listen("]", partial(change_rhs_box, "stack", 1, 1))
# }}}
# {{{ breakpoint listeners
def save_breakpoints(w, size, key):
self.debugger.save_breakpoints()
def delete_breakpoint(w, size, key):
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently delete a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
bp_list = self._get_bp_list()
if bp_list:
_, pos = self.bp_list._w.get_focus()
bp = bp_list[pos]
if bp_source_identifier == bp.file and bp.line-1 < len(self.source):
self.source[bp.line-1].set_breakpoint(False)
err = self.debugger.clear_break(bp.file, bp.line)
if err:
self.message("Error clearing breakpoint:\n" + err)
else:
self.update_breakpoints()
def enable_disable_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is None:
return
bp = self._get_bp_list()[pos]
bp.enabled = not bp.enabled
sline = self.source[bp.line-1]
sline.set_breakpoint(bp.enabled)
self.update_breakpoints()
def examine_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is None:
return
bp = self._get_bp_list()[pos]
if bp.cond is None:
cond = ""
else:
cond = str(bp.cond)
enabled_checkbox = urwid.CheckBox(
"Enabled", bp.enabled)
cond_edit = urwid.Edit([
("label", "Condition: ")
], cond)
ign_count_edit = urwid.IntEdit([
("label", "Ignore the next N times: ")
], bp.ignore)
lb = urwid.ListBox(urwid.SimpleListWalker([
labelled_value("File: ", bp.file),
labelled_value("Line: ", bp.line),
labelled_value("Hits: ", bp.hits),
urwid.Text(""),
enabled_checkbox,
urwid.AttrMap(cond_edit, "value", "value"),
urwid.AttrMap(ign_count_edit, "value", "value"),
]))
result = self.dialog(lb, [
("OK", True),
("Cancel", False),
None,
("Delete", "del"),
("Location", "loc"),
], title="Edit Breakpoint")
if result is True:
bp.enabled = enabled_checkbox.get_state()
bp.ignore = int(ign_count_edit.value())
cond = cond_edit.get_edit_text()
if cond:
bp.cond = cond
else:
bp.cond = None
elif result == "loc":
self.show_line(bp.line,
FileSourceCodeProvider(self.debugger, bp.file))
self.columns.set_focus(0)
elif result == "del":
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently delete a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
if bp_source_identifier == bp.file:
self.source[bp.line-1].set_breakpoint(False)
err = self.debugger.clear_break(bp.file, bp.line)
if err:
self.message("Error clearing breakpoint:\n" + err)
else:
self.update_breakpoints()
def show_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is not None:
bp = self._get_bp_list()[pos]
self.show_line(bp.line,
FileSourceCodeProvider(self.debugger, bp.file))
self.bp_list.listen("enter", show_breakpoint)
self.bp_list.listen("d", delete_breakpoint)
self.bp_list.listen("s", save_breakpoints)
self.bp_list.listen("e", examine_breakpoint)
self.bp_list.listen("b", enable_disable_breakpoint)
self.bp_list.listen("H", move_stack_top)
self.bp_list.listen("[", partial(change_rhs_box, "breakpoints", 2, -1))
self.bp_list.listen("]", partial(change_rhs_box, "breakpoints", 2, 1))
# }}}
# {{{ source listeners
def end():
self.debugger.save_breakpoints()
self.quit_event_loop = True
def next_line(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_next(self.debugger.curframe)
end()
def step(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_step()
end()
def finish(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_return(self.debugger.curframe)
end()
def cont(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_continue()
end()
def run_to_cursor(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
sline, pos = self.source.get_focus()
lineno = pos+1
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently set a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
from pudb.lowlevel import get_breakpoint_invalid_reason
invalid_reason = get_breakpoint_invalid_reason(
bp_source_identifier, lineno)
if invalid_reason is not None:
self.message(
"Cannot run to the line you indicated, "
"for the following reason:\n\n"
+ invalid_reason)
else:
err = self.debugger.set_break(
bp_source_identifier, pos+1, temporary=True)
if err:
self.message("Error dealing with breakpoint:\n" + err)
self.debugger.set_continue()
end()
def go_to_line(w, size, key):
_, line = self.source.get_focus()
lineno_edit = urwid.IntEdit([
("label", "Go to Line :")
], None)
if self.dialog(
urwid.ListBox(urwid.SimpleListWalker([
labelled_value("File :",
self.source_code_provider.identifier()),
labelled_value("Current Line :", line+1),
urwid.AttrMap(lineno_edit, "value")
])),
[
("OK", True),
("Cancel", False),
], title="Go to Line Number"):
lineno = min(max(0, int(lineno_edit.value())-1), len(self.source)-1)
self.source.set_focus(lineno)
def scroll_left(w, size, key):
self.source_hscroll_start = max(
0,
self.source_hscroll_start - 4)
for sl in self.source:
sl._invalidate()
def scroll_right(w, size, key):
self.source_hscroll_start += 4
for sl in self.source:
sl._invalidate()
def search(w, size, key):
self.search_controller.open_search_ui()
def search_next(w, size, key):
self.search_controller.perform_search(dir=1, update_search_start=True)
def search_previous(w, size, key):
self.search_controller.perform_search(dir=-1, update_search_start=True)
def toggle_breakpoint(w, size, key):
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier:
sline, pos = self.source.get_focus()
lineno = pos+1
existing_breaks = self.debugger.get_breaks(
bp_source_identifier, lineno)
if existing_breaks:
err = None
for bp in existing_breaks:
if not bp.enabled:
bp.enable()
sline.set_breakpoint(True)
# Unsure about this. Are multiple breakpoints even
# possible?
break
else:
err = self.debugger.clear_break(bp_source_identifier, lineno)
sline.set_breakpoint(False)
else:
file_lineno = (bp_source_identifier, lineno)
if file_lineno in self.debugger.set_traces:
self.debugger.set_traces[file_lineno] = \
not self.debugger.set_traces[file_lineno]
sline.set_breakpoint(self.debugger.set_traces[file_lineno])
return
from pudb.lowlevel import get_breakpoint_invalid_reason
invalid_reason = get_breakpoint_invalid_reason(
bp_source_identifier, pos+1)
if invalid_reason is not None:
do_set = not self.dialog(
urwid.ListBox(
urwid.SimpleListWalker([
urwid.Text(
"The breakpoint you just set may be "
"invalid, for the following reason:\n\n"
+ invalid_reason),
])), [
("Cancel", True),
("Set Anyway", False),
],
title="Possibly Invalid Breakpoint",
focus_buttons=True)
else:
do_set = True
if do_set:
err = self.debugger.set_break(bp_source_identifier, pos+1)
sline.set_breakpoint(True)
else:
err = None
if err:
self.message("Error dealing with breakpoint:\n" + err)
self.update_breakpoints()
else:
self.message(
"Cannot currently set a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
def pick_module(w, size, key):
from os.path import splitext
import sys
def mod_exists(mod):
if not hasattr(mod, "__file__"):
return False
if mod.__file__ is None:
return False
filename = mod.__file__
base, ext = splitext(filename)
ext = ext.lower()
from os.path import exists
if ext == ".pyc":
return exists(base+".py")
else:
return ext == ".py"
new_mod_text = SelectableText("-- update me --")
new_mod_entry = urwid.AttrMap(new_mod_text,
None, "focused selectable")
def build_filtered_mod_list(filt_string=""):
modules = sorted(name
# mod_exists may change the size of sys.modules,
# causing this to crash. Copy to a list.
for name, mod in list(sys.modules.items())
if mod_exists(mod))
result = [urwid.AttrMap(SelectableText(mod),
None, "focused selectable")
for mod in modules if filt_string in mod]
new_mod_text.set_text("<<< IMPORT MODULE '%s' >>>" % filt_string)
result.append(new_mod_entry)
return result
def show_mod(mod):
filename = self.debugger.canonic(mod.__file__)
base, ext = splitext(filename)
if ext == ".pyc":
ext = ".py"
filename = base+".py"
self.set_source_code_provider(
FileSourceCodeProvider(self.debugger, filename))
self.source_list.set_focus(0)
class FilterEdit(urwid.Edit):
def keypress(self, size, key):
result = urwid.Edit.keypress(self, size, key)
if result is None:
mod_list[:] = build_filtered_mod_list(
self.get_edit_text())
return result
filt_edit = FilterEdit([("label", "Filter: ")],
self.last_module_filter)
mod_list = urwid.SimpleListWalker(
build_filtered_mod_list(filt_edit.get_edit_text()))
lb = urwid.ListBox(mod_list)
w = urwid.Pile([
("flow", urwid.AttrMap(filt_edit, "value")),
("fixed", 1, urwid.SolidFill()),
urwid.AttrMap(lb, "selectable")])
while True:
result = self.dialog(w, [
("OK", True),
("Cancel", False),
("Reload", "reload"),
], title="Pick Module")
self.last_module_filter = filt_edit.get_edit_text()
if result is True:
widget, pos = lb.get_focus()
if widget is new_mod_entry:
new_mod_name = filt_edit.get_edit_text()
try:
__import__(str(new_mod_name))
except Exception:
from traceback import format_exception
self.message(
"Could not import module '{}':\n\n{}".format(
new_mod_name, "".join(
format_exception(*sys.exc_info()))),
title="Import Error")
else:
show_mod(__import__(str(new_mod_name)))
break
else:
show_mod(sys.modules[widget.base_widget.get_text()[0]])
break
elif result is False:
break
elif result == "reload":
widget, pos = lb.get_focus()
if widget is not new_mod_entry:
mod_name = widget.base_widget.get_text()[0]
mod = sys.modules[mod_name]
import importlib
importlib.reload(mod)
self.message("'%s' was successfully reloaded." % mod_name)
if self.source_code_provider is not None:
self.source_code_provider.clear_cache()
self.set_source_code_provider(self.source_code_provider,
force_update=True)
_, pos = self.stack_list._w.get_focus()
self.debugger.set_frame_index(
self.translate_ui_stack_index(pos))
def helpmain(w, size, key):
help(HELP_HEADER + HELP_MAIN + HELP_SIDE + HELP_LICENSE)
self.source_sigwrap.listen("n", next_line)
self.source_sigwrap.listen("s", step)
self.source_sigwrap.listen("f", finish)
self.source_sigwrap.listen("r", finish)
self.source_sigwrap.listen("c", cont)
self.source_sigwrap.listen("t", run_to_cursor)
self.source_sigwrap.listen("L", go_to_line)
self.source_sigwrap.listen("/", search)
self.source_sigwrap.listen(",", search_previous)
self.source_sigwrap.listen(".", search_next)
self.source_sigwrap.listen("b", toggle_breakpoint)
self.source_sigwrap.listen("m", pick_module)
self.source_sigwrap.listen("H", move_stack_top)
self.source_sigwrap.listen("u", move_stack_up)
self.source_sigwrap.listen("d", move_stack_down)
# left/right scrolling have to be handled specially, normal vi keys
# don't cut it
self.source_sigwrap.listen("h", scroll_left)
self.source_sigwrap.listen("l", scroll_right)
add_vi_nav_keys(self.source_sigwrap)
add_help_keys(self.source_sigwrap, helpmain)
# }}}
# {{{ command line listeners
def cmdline_get_namespace():
curframe = self.debugger.curframe
from pudb.shell import SetPropagatingDict
return SetPropagatingDict(
[curframe.f_locals, curframe.f_globals],
curframe.f_locals)
def cmdline_tab_complete(w, size, key):
try:
from jedi import Interpreter
except ImportError:
self.add_cmdline_content(
"Tab completion requires jedi to be installed. ",
"command line error")
return
import jedi
from distutils.version import LooseVersion
if LooseVersion(jedi.__version__) < LooseVersion("0.16.0"):
self.add_cmdline_content(
"jedi 0.16.0 is required for Tab completion",
"command line error")
text = self.cmdline_edit.edit_text
pos = self.cmdline_edit.edit_pos
chopped_text = text[:pos]
suffix = text[pos:]
try:
completions = Interpreter(
chopped_text,
[cmdline_get_namespace()]).complete()
except Exception as e:
# Jedi sometimes produces errors. Ignore them.
self.add_cmdline_content(
"Could not tab complete (Jedi error: '%s')" % e,
"command line error")
return
full_completions = [i.name_with_symbols for i in completions]
chopped_completions = [i.complete for i in completions]
def common_prefix(a, b):
for i, (a_i, b_i) in enumerate(zip(a, b)):
if a_i != b_i:
return a[:i]
return a[:max(len(a), len(b))]
common_compl_prefix = None
for completion in chopped_completions:
if common_compl_prefix is None:
common_compl_prefix = completion
else:
common_compl_prefix = common_prefix(
common_compl_prefix, completion)
completed_chopped_text = common_compl_prefix
if completed_chopped_text is None:
return
if (
len(completed_chopped_text) == 0
and len(completions) > 1):
self.add_cmdline_content(
" ".join(full_completions),
"command line output")
return
self.cmdline_edit.edit_text = \
chopped_text+completed_chopped_text+suffix
self.cmdline_edit.edit_pos = (
len(chopped_text)
+ len(completed_chopped_text))
def cmdline_append_newline(w, size, key):
self.cmdline_edit.insert_text("\n")
def cmdline_exec(w, size, key):
cmd = self.cmdline_edit.get_edit_text()
if not cmd:
# blank command -> refuse service
return
self.add_cmdline_content(">>> " + cmd, "command line input")
if not self.cmdline_history or cmd != self.cmdline_history[-1]:
self.cmdline_history.append(cmd)
self.cmdline_history_position = -1
prev_sys_stdin = sys.stdin
prev_sys_stdout = sys.stdout
prev_sys_stderr = sys.stderr
from io import StringIO
sys.stdin = None
sys.stderr = sys.stdout = StringIO()
try:
eval(compile(cmd, "<pudb command line>", "single"),
cmdline_get_namespace())
except Exception:
tp, val, tb = sys.exc_info()
import traceback
tblist = traceback.extract_tb(tb)
del tblist[:1]
tb_lines = traceback.format_list(tblist)
if tb_lines:
tb_lines.insert(0, "Traceback (most recent call last):\n")
tb_lines[len(tb_lines):] = traceback.format_exception_only(tp, val)
self.add_cmdline_content("".join(tb_lines), "command line error")
else:
self.cmdline_edit.set_edit_text("")
finally:
if sys.stdout.getvalue():
self.add_cmdline_content(sys.stdout.getvalue(),
"command line output")
sys.stdin = prev_sys_stdin
sys.stdout = prev_sys_stdout
sys.stderr = prev_sys_stderr
def cmdline_history_browse(direction):
if self.cmdline_history_position == -1:
self.cmdline_history_position = len(self.cmdline_history)
self.cmdline_history_position += direction
if 0 <= self.cmdline_history_position < len(self.cmdline_history):
self.cmdline_edit.edit_text = \
self.cmdline_history[self.cmdline_history_position]
else:
self.cmdline_history_position = -1
self.cmdline_edit.edit_text = ""
self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text)
def cmdline_history_prev(w, size, key):
cmdline_history_browse(-1)
def cmdline_history_next(w, size, key):
cmdline_history_browse(1)
def cmdline_start_of_line(w, size, key):
self.cmdline_edit.edit_pos = 0
def cmdline_end_of_line(w, size, key):
self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text)
def cmdline_del_word(w, size, key):
pos = self.cmdline_edit.edit_pos
before, after = (
self.cmdline_edit.edit_text[:pos],
self.cmdline_edit.edit_text[pos:])
before = before[::-1]
before = before.lstrip()
i = 0
while i < len(before):
if not before[i].isspace():
i += 1
else:
break
self.cmdline_edit.edit_text = before[i:][::-1] + after
self.cmdline_edit.edit_post = len(before[i:])
def cmdline_del_to_start_of_line(w, size, key):
pos = self.cmdline_edit.edit_pos
self.cmdline_edit.edit_text = self.cmdline_edit.edit_text[pos:]
self.cmdline_edit.edit_pos = 0
def toggle_cmdline_focus(w, size, key):
self.columns.set_focus(self.lhs_col)
if self.lhs_col.get_focus() is self.cmdline_sigwrap:
if CONFIG["hide_cmdline_win"]:
self.set_cmdline_state(False)
self.lhs_col.set_focus(self.search_controller.search_AttrMap
if self.search_controller.search_box else
self.source_attr)
else:
if CONFIG["hide_cmdline_win"]:
self.set_cmdline_state(True)
self.cmdline_pile.set_focus(self.cmdline_edit_bar)
self.lhs_col.set_focus(self.cmdline_sigwrap)
self.cmdline_edit_sigwrap.listen("tab", cmdline_tab_complete)
self.cmdline_edit_sigwrap.listen("ctrl v", cmdline_append_newline)
self.cmdline_edit_sigwrap.listen("enter", cmdline_exec)
self.cmdline_edit_sigwrap.listen("ctrl n", cmdline_history_next)
self.cmdline_edit_sigwrap.listen("ctrl p", cmdline_history_prev)
self.cmdline_edit_sigwrap.listen("esc", toggle_cmdline_focus)
self.cmdline_edit_sigwrap.listen("ctrl d", toggle_cmdline_focus)
self.cmdline_edit_sigwrap.listen("ctrl a", cmdline_start_of_line)
self.cmdline_edit_sigwrap.listen("ctrl e", cmdline_end_of_line)
self.cmdline_edit_sigwrap.listen("ctrl w", cmdline_del_word)
self.cmdline_edit_sigwrap.listen("ctrl u", cmdline_del_to_start_of_line)
self.top.listen("ctrl x", toggle_cmdline_focus)
# {{{ command line sizing
def set_cmdline_default_size(weight):
self.cmdline_weight = weight
self.set_cmdline_size()
def max_cmdline(w, size, key):
set_cmdline_default_size(5)
def min_cmdline(w, size, key):
set_cmdline_default_size(1/2)
def grow_cmdline(w, size, key):
weight = self.cmdline_weight
if weight < 5:
weight *= 1.25
set_cmdline_default_size(weight)
def shrink_cmdline(w, size, key):
weight = self.cmdline_weight
if weight > 1/2:
weight /= 1.25
set_cmdline_default_size(weight)
self.cmdline_sigwrap.listen("=", max_cmdline)
self.cmdline_sigwrap.listen("+", grow_cmdline)
self.cmdline_sigwrap.listen("_", min_cmdline)
self.cmdline_sigwrap.listen("-", shrink_cmdline)
# }}}
# }}}
# {{{ sidebar sizing
def max_sidebar(w, size, key):
from pudb.settings import save_config
weight = 5
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def min_sidebar(w, size, key):
from pudb.settings import save_config
weight = 1/5
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def grow_sidebar(w, size, key):
from pudb.settings import save_config
weight = self.columns.column_types[1][1]
if weight < 5:
weight *= 1.25
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def shrink_sidebar(w, size, key):
from pudb.settings import save_config
weight = self.columns.column_types[1][1]
if weight > 1/5:
weight /= 1.25
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
self.rhs_col_sigwrap.listen("=", max_sidebar)
self.rhs_col_sigwrap.listen("+", grow_sidebar)
self.rhs_col_sigwrap.listen("_", min_sidebar)
self.rhs_col_sigwrap.listen("-", shrink_sidebar)
# }}}
# {{{ top-level listeners
def show_output(w, size, key):
self.screen.stop()
input("Hit Enter to return:")
self.screen.start()
def reload_breakpoints_and_redisplay():
reload_breakpoints()
curr_line = self.current_line
self.set_source_code_provider(self.source_code_provider,
force_update=True)
if curr_line is not None:
self.current_line = self.source[int(curr_line.line_nr)-1]
self.current_line.set_current(True)
def reload_breakpoints():
self.debugger.clear_all_breaks()
from pudb.settings import load_breakpoints
for bpoint_descr in load_breakpoints():
dbg.set_break(*bpoint_descr)
self.update_breakpoints()
def show_traceback(w, size, key):
if self.current_exc_tuple is not None:
from traceback import format_exception
result = self.dialog(
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(
"".join(format_exception(*self.current_exc_tuple)))])),
[
("Close", "close"),
("Location", "location")
],
title="Exception Viewer",
focus_buttons=True,
bind_enter_esc=False)
if result == "location":
self.debugger.set_frame_index(len(self.debugger.stack)-1)
else:
self.message("No exception available.")
def run_external_cmdline(w, size, key):
self.screen.stop()
curframe = self.debugger.curframe
import pudb.shell as shell
if CONFIG["shell"] == "ipython" and shell.have_ipython():
runner = shell.run_ipython_shell
elif CONFIG["shell"] == "ipython_kernel" and shell.have_ipython():
runner = shell.run_ipython_kernel
elif CONFIG["shell"] == "bpython" and shell.HAVE_BPYTHON:
runner = shell.run_bpython_shell
elif CONFIG["shell"] == "ptpython" and shell.HAVE_PTPYTHON:
runner = shell.run_ptpython_shell
elif CONFIG["shell"] == "ptipython" and shell.HAVE_PTIPYTHON:
runner = shell.run_ptipython_shell
elif CONFIG["shell"] == "classic":
runner = shell.run_classic_shell
else:
try:
if not shell.custom_shell_dict: # Only execfile once
from os.path import expanduser
cshell_fname = expanduser(CONFIG["shell"])
with open(cshell_fname) as inf:
exec(compile(inf.read(), cshell_fname, "exec"),
shell.custom_shell_dict,
shell.custom_shell_dict)
except Exception:
print("Error when importing custom shell:")
from traceback import print_exc
print_exc()
print("Falling back to classic shell")
runner = shell.run_classic_shell
else:
if "pudb_shell" not in shell.custom_shell_dict:
print("%s does not contain a function named pudb_shell at "
"the module level." % CONFIG["shell"])
print("Falling back to classic shell")
runner = shell.run_classic_shell
else:
runner = shell.custom_shell_dict["pudb_shell"]
runner(curframe.f_globals, curframe.f_locals)
self.screen.start()
self.update_var_view()
def run_cmdline(w, size, key):
if CONFIG["shell"] == "internal":
return toggle_cmdline_focus(w, size, key)
else:
return run_external_cmdline(w, size, key)
def focus_code(w, size, key):
self.columns.set_focus(self.lhs_col)
self.lhs_col.set_focus(self.source_attr)
class RHColumnFocuser:
def __init__(self, idx):
self.idx = idx
def __call__(subself, w, size, key): # noqa # pylint: disable=no-self-argument
self.columns.set_focus(self.rhs_col_sigwrap)
self.rhs_col.set_focus(self.rhs_col.widget_list[subself.idx])
def quit(w, size, key):
self.debugger.set_quit()
end()
def do_edit_config(w, size, key):
self.run_edit_config()
def redraw_screen(w, size, key):
self.screen.clear()
def help(pages):
self.message(pages, title="PuDB - The Python Urwid Debugger")
def edit_current_frame(w, size, key):
_, pos = self.source.get_focus()
source_identifier = \
self.source_code_provider.get_source_identifier()
if source_identifier is None:
self.message(
"Cannot edit the current file--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
open_file_editor(source_identifier, pos+1)
self.top.listen("o", show_output)
self.top.listen("ctrl r",
lambda w, size, key: reload_breakpoints_and_redisplay())
self.top.listen("!", run_cmdline)
self.top.listen("e", show_traceback)
self.top.listen("C", focus_code)
self.top.listen("V", RHColumnFocuser(0))
self.top.listen("S", RHColumnFocuser(1))
self.top.listen("B", RHColumnFocuser(2))
self.top.listen("q", quit)
self.top.listen("ctrl p", do_edit_config)
self.top.listen("ctrl l", redraw_screen)
self.top.listen("ctrl e", edit_current_frame)
# }}}
# {{{ setup
want_curses_display = (
CONFIG["display"] == "curses"
or (
CONFIG["display"] == "auto"
and not (
os.environ.get("TERM", "").startswith("xterm")
or os.environ.get("TERM", "").startswith("rxvt")
)))
if (want_curses_display
and not (stdin is not None or stdout is not None)
and CursesScreen is not None):
self.screen = ThreadsafeCursesScreen()
else:
screen_kwargs = {}
if stdin is not None:
screen_kwargs["input"] = stdin
if stdout is not None:
screen_kwargs["output"] = stdout
if term_size is not None:
screen_kwargs["term_size"] = term_size
if screen_kwargs:
self.screen = ThreadsafeFixedSizeRawScreen(**screen_kwargs)
else:
self.screen = ThreadsafeRawScreen()
del want_curses_display
if curses:
try:
curses.setupterm()
except Exception:
# Something went wrong--oh well. Nobody will die if their
# 256 color support breaks. Just carry on without it.
# https://github.com/inducer/pudb/issues/78
pass
else:
color_support = curses.tigetnum("colors")
if color_support == 256 and isinstance(self.screen, RawScreen):
self.screen.set_terminal_properties(256)
self.setup_palette(self.screen)
self.show_count = 0
self.source_code_provider = None
self.current_line = None
self.quit_event_loop = False
# }}}
# }}}
# {{{ UI helpers
def add_cmdline_content(self, s, attr):
s = s.rstrip("\n")
from pudb.ui_tools import SelectableText
self.cmdline_contents.append(
urwid.AttrMap(SelectableText(s), attr, "focused "+attr))
# scroll to end of last entry
self.cmdline_list.set_focus_valign("bottom")
self.cmdline_list.set_focus(len(self.cmdline_contents) - 1,
coming_from="above")
# Force the commandline to be visible
self.set_cmdline_state(True)
def reset_cmdline_size(self):
self.lhs_col.item_types[-1] = "weight", \
self.cmdline_weight if self.cmdline_on else 0
def set_cmdline_size(self, weight=None):
if weight is None:
weight = self.cmdline_weight
self.lhs_col.item_types[-1] = "weight", weight
self.lhs_col._invalidate()
def set_cmdline_state(self, state_on):
if state_on != self.cmdline_on:
self.cmdline_on = state_on
self.set_cmdline_size(None if state_on else 0)
def translate_ui_stack_index(self, index):
# note: self-inverse
if CONFIG["current_stack_frame"] == "top":
return len(self.debugger.stack)-1-index
elif CONFIG["current_stack_frame"] == "bottom":
return index
else:
raise ValueError("invalid value for 'current_stack_frame' pref")
def message(self, msg, title="Message", **kwargs):
self.call_with_ui(self.dialog,
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(msg)])),
[("OK", True)], title=title, **kwargs)
def run_edit_config(self):
from pudb.settings import edit_config, save_config
edit_config(self, CONFIG)
save_config(CONFIG)
def dialog(self, content, buttons_and_results,
title=None, bind_enter_esc=True, focus_buttons=False,
extra_bindings=[]):
class ResultSetter:
def __init__(subself, res): # noqa: N805, E501 # pylint: disable=no-self-argument
subself.res = res
def __call__(subself, btn): # noqa: N805, E501 # pylint: disable=no-self-argument
self.quit_event_loop = [subself.res]
Attr = urwid.AttrMap # noqa
if bind_enter_esc:
content = SignalWrap(content)
def enter(w, size, key):
self.quit_event_loop = [True]
def esc(w, size, key):
self.quit_event_loop = [False]
content.listen("enter", enter)
content.listen("esc", esc)
button_widgets = []
for btn_descr in buttons_and_results:
if btn_descr is None:
button_widgets.append(urwid.Text(""))
else:
btn_text, btn_result = btn_descr
button_widgets.append(
Attr(urwid.Button(btn_text, ResultSetter(btn_result)),
"button", "focused button"))
w = urwid.Columns([
content,
("fixed", 15, urwid.ListBox(urwid.SimpleListWalker(button_widgets))),
], dividechars=1)
if focus_buttons:
w.set_focus_column(1)
if title is not None:
w = urwid.Pile([
("flow", urwid.AttrMap(
urwid.Text(title, align="center"),
"dialog title")),
("fixed", 1, urwid.SolidFill()),
w])
class ResultSettingEventHandler:
def __init__(subself, res): # noqa: N805, E501 # pylint: disable=no-self-argument
subself.res = res
def __call__(subself, w, size, key): # noqa: N805, E501 # pylint: disable=no-self-argument
self.quit_event_loop = [subself.res]
w = SignalWrap(w)
for key, binding in extra_bindings:
if isinstance(binding, str):
w.listen(key, ResultSettingEventHandler(binding))
else:
w.listen(key, binding)
w = urwid.LineBox(w)
w = urwid.Overlay(w, self.top,
align="center",
valign="middle",
width=("relative", 75),
height=("relative", 75),
)
w = Attr(w, "background")
return self.event_loop(w)[0]
@staticmethod
def setup_palette(screen):
may_use_fancy_formats = not hasattr(urwid.escape, "_fg_attr_xterm")
from pudb.theme import get_palette
screen.register_palette(
get_palette(may_use_fancy_formats, CONFIG["theme"]))
def show_exception_dialog(self, exc_tuple):
from traceback import format_exception
desc = (
"The program has terminated abnormally because of an exception.\n\n"
"A full traceback is below. You may recall this traceback at any "
"time using the 'e' key. The debugger has entered post-mortem mode "
"and will prevent further state changes."
)
tb_txt = "".join(format_exception(*exc_tuple))
self._show_exception_dialog(
description=desc,
error_info=tb_txt,
title="Program Terminated for Uncaught Exception",
exit_loop_on_ok=True,
)
def show_internal_exc_dlg(self, exc_tuple):
try:
self._show_internal_exc_dlg(exc_tuple)
except Exception:
ui_log.exception("Error while showing error dialog")
def _show_internal_exc_dlg(self, exc_tuple):
from traceback import format_exception
from pudb import VERSION
desc = (
"Pudb has encountered and safely caught an internal exception.\n\n"
"The full traceback and some other information can be found "
"below. Please report this information, along with details on "
"what you were doing at the time the exception occurred, at: "
"https://github.com/inducer/pudb/issues"
)
error_info = (
"python version: {python}\n"
"pudb version: {pudb}\n"
"urwid version: {urwid}\n"
"{tb}\n"
).format(
python=sys.version.replace("\n", " "),
pudb=VERSION,
urwid=".".join(map(str, urwid.version.VERSION)),
tb="".join(format_exception(*exc_tuple))
)
self._show_exception_dialog(
description=desc,
error_info=error_info,
title="Pudb Internal Exception Encountered",
)
def _show_exception_dialog(self, description, error_info, title,
exit_loop_on_ok=False):
res = self.dialog(
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(
"\n\n".join([description, error_info])
)])),
title=title,
buttons_and_results=[
("OK", exit_loop_on_ok),
("Save traceback", "save"),
],
)
if res == "save":
self._save_traceback(error_info)
def _save_traceback(self, error_info):
try:
from os.path import exists
filename = next(
fname for n in count()
for fname in ["traceback-%d.txt" % n if n else "traceback.txt"]
if not exists(fname)
)
with open(filename, "w") as outf:
outf.write(error_info)
self.message("Traceback saved as %s." % filename, title="Success")
except Exception:
from traceback import format_exception
io_tb_txt = "".join(format_exception(*sys.exc_info()))
self.message(
"An error occurred while trying to write "
"the traceback:\n\n" + io_tb_txt,
title="I/O error")
# }}}
# {{{ UI enter/exit
def show(self):
if self.show_count == 0:
self.screen.start()
self.show_count += 1
def hide(self):
self.show_count -= 1
if self.show_count == 0:
self.screen.stop()
def call_with_ui(self, f, *args, **kwargs):
self.show()
try:
return f(*args, **kwargs)
finally:
self.hide()
# }}}
# {{{ interaction
def event_loop(self, toplevel=None):
prev_quit_loop = self.quit_event_loop
try:
import pygments # noqa
except ImportError:
if not hasattr(self, "pygments_message_shown"):
self.pygments_message_shown = True
self.message("Package 'pygments' not found. "
"Syntax highlighting disabled.")
WELCOME_LEVEL = "e039" # noqa
if CONFIG["seen_welcome"] < WELCOME_LEVEL:
CONFIG["seen_welcome"] = WELCOME_LEVEL
from pudb import VERSION
self.message("Welcome to PudB %s!\n\n"
"PuDB is a full-screen, console-based visual debugger for "
"Python. Its goal is to provide all the niceties of modern "
"GUI-based debuggers in a more lightweight and "
"keyboard-friendly package. "
"PuDB allows you to debug code right where you write and test "
"it--in a terminal. If you've worked with the excellent "
"(but nowadays ancient) DOS-based Turbo Pascal or C tools, "
"PuDB's UI might look familiar.\n\n"
"If you're new here, welcome! The help screen "
"(invoked by hitting '?' after this message) should get you "
"on your way.\n"
"\nChanges in version 2021.1:\n\n"
"- Add shortcut to edit files in source and stack view "
"(Gábor Vecsei)\n"
"- Major improvements to the variable view "
"(Michael van der Kamp)\n"
"- Better internal error reporting (Michael van der Kamp)\n"
"\nChanges in version 2020.1:\n\n"
"- Add vi keys for the sidebar (Asbjørn Apeland)\n"
"- Add -m command line switch (Elias Dorneles)\n"
"- Debug forked processes (Jonathan Striebel)\n"
"- Robustness and logging for internal errors "
"(Michael Vanderkamp)\n"
"- 'Reverse' remote debugging (jen6)\n"
"\nChanges in version 2019.2:\n\n"
"- Auto-hide the command line (Mark Blakeney)\n"
"- Improve help and add jump to breakpoint (Mark Blakeney)\n"
"- Drop Py2.6 support\n"
"- Show callable attributes in var view\n"
"- Allow scrolling sidebar with j/k\n"
"- Fix setting breakpoints in Py3.8 (Aaron Meurer)\n"
"\nChanges in version 2019.1:\n\n"
"- Allow 'space' as a key to expand variables (Enrico Troeger)\n"
"- Have a persistent setting on variable visibility \n"
" (Enrico Troeger)\n"
"- Enable/partially automate opening the debugger in another \n"
" terminal (Anton Barkovsky)\n"
"- Make sidebar scrollable with j/k (Clayton Craft)\n"
"- Bug fixes.\n"
"\nChanges in version 2018.1:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.4:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.3:\n\n"
"- Add handling of safely_stringify_for_pudb to allow custom \n"
" per-type stringification.\n"
"- Add support for custom shells.\n"
"- Better support for 2-wide characters in the var view.\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.2:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.1:\n\n"
"- IMPORTANT: 2017.1 and possibly earlier versions had a \n"
" bug with exponential growth of shell history for the \n"
" 'classic' shell, which (among other problems) could lead\n"
" to slow startup of the classic shell. Check the file\n\n"
" ~/.config/pudb/shell-history\n\n"
" for size (and useful content) and delete/trim as needed.\n"
"\nChanges in version 2017.1:\n\n"
"- Many, many bug fixes (thank you to all who contributed!)\n"
"\nChanges in version 2016.2:\n\n"
"- UI improvements for disabled breakpoints.\n"
"- Bug fixes.\n"
"\nChanges in version 2016.1:\n\n"
"- Fix module browser on Py3.\n"
"\nChanges in version 2015.4:\n\n"
"- Support for (somewhat rudimentary) remote debugging\n"
" through a telnet connection.\n"
"- Fix debugging of generated code in Python 3.\n"
"\nChanges in version 2015.3:\n\n"
"- Disable set_trace lines from the UI (Aaron Meurer)\n"
"- Better control over attribute visibility (Ned Batchelder)\n"
"\nChanges in version 2015.2:\n\n"
"- ptpython support (P. Varet)\n"
"- Improved rxvt support (Louper Rouch)\n"
"- More keyboard shortcuts in the command line"
"(Alex Sheluchin)\n"
"\nChanges in version 2015.1:\n\n"
"- Add solarized theme (Rinat Shigapov)\n"
"- More keyboard shortcuts in the command line"
"(Alexander Corwin)\n"
"\nChanges in version 2014.1:\n\n"
"- Make prompt-on-quit optional (Mike Burr)\n"
"- Make tab completion in the built-in shell saner\n"
"- Fix handling of unicode source\n"
" (reported by Morten Nielsen and Buck Golemon)\n"
"\nChanges in version 2013.5.1:\n\n"
"- Fix loading of saved breakpoint conditions "
"(Antoine Dechaume)\n"
"- Fixes for built-in command line\n"
"- Theme updates\n"
"\nChanges in version 2013.5:\n\n"
"- Add command line window\n"
"- Uses curses display driver when appropriate\n"
"\nChanges in version 2013.4:\n\n"
"- Support for debugging generated code\n"
"\nChanges in version 2013.3.5:\n\n"
"- IPython fixes (Aaron Meurer)\n"
"- Py2/3 configuration fixes (Somchai Smythe)\n"
"- PyPy fixes (Julian Berman)\n"
"\nChanges in version 2013.3.4:\n\n"
"- Don't die if curses doesn't like what stdin/out are\n"
" connected to.\n"
"\nChanges in version 2013.3.3:\n\n"
"- As soon as pudb is loaded, you can break to the debugger by\n"
" evaluating the expression 'pu.db', where 'pu' is a new \n"
" 'builtin' that pudb has rudely shoved into the interpreter.\n"
"\nChanges in version 2013.3.2:\n\n"
"- Don't attempt to do signal handling if a signal handler\n"
" is already set (Fix by Buck Golemon).\n"
"\nChanges in version 2013.3.1:\n\n"
"- Don't ship {ez,distribute}_setup at all.\n"
" It breaks more than it helps.\n"
"\nChanges in version 2013.3:\n\n"
"- Switch to setuptools as a setup helper.\n"
"\nChanges in version 2013.2:\n\n"
"- Even more bug fixes.\n"
"\nChanges in version 2013.1:\n\n"
"- Ctrl-C will now break to the debugger in a way that does\n"
" not terminate the program\n"
"- Lots of bugs fixed\n"
"\nChanges in version 2012.3:\n\n"
"- Python 3 support (contributed by Brad Froehle)\n"
"- Better search box behavior (suggested by Ram Rachum)\n"
"- Made it possible to go back and examine state from "
"'finished' window. (suggested by Aaron Meurer)\n"
"\nChanges in version 2012.2.1:\n\n"
"- Don't touch config files during install.\n"
"\nChanges in version 2012.2:\n\n"
"- Add support for BPython as a shell.\n"
"- You can now run 'python -m pudb script.py' on Py 2.6+.\n"
" '-m pudb.run' still works--but it's four "
"keystrokes longer! :)\n"
"\nChanges in version 2012.1:\n\n"
"- Work around an API change in IPython 0.12.\n"
"\nChanges in version 2011.3.1:\n\n"
"- Work-around for bug in urwid >= 1.0.\n"
"\nChanges in version 2011.3:\n\n"
"- Finer-grained string highlighting "
"(contributed by Aaron Meurer)\n"
"- Prefs tweaks, instant-apply, top-down stack "
"(contributed by Aaron Meurer)\n"
"- Size changes in sidebar boxes (contributed by Aaron Meurer)\n"
"- New theme 'midnight' (contributed by Aaron Meurer)\n"
"- Support for IPython 0.11 (contributed by Chris Farrow)\n"
"- Suport for custom stringifiers "
"(contributed by Aaron Meurer)\n"
"- Line wrapping in variables view "
"(contributed by Aaron Meurer)\n"
"\nChanges in version 2011.2:\n\n"
"- Fix for post-mortem debugging (contributed by 'Sundance')\n"
"\nChanges in version 2011.1:\n\n"
"- Breakpoints saved between sessions\n"
"- A new 'dark vim' theme\n"
"(both contributed by Naveen Michaud-Agrawal)\n"
"\nChanges in version 0.93:\n\n"
"- Stored preferences (no more pesky IPython prompt!)\n"
"- Themes\n"
"- Line numbers (optional)\n"
% VERSION)
from pudb.settings import save_config
save_config(CONFIG)
self.run_edit_config()
try:
if toplevel is None:
toplevel = self.top
self.size = self.screen.get_cols_rows()
self.quit_event_loop = False
while not self.quit_event_loop:
canvas = toplevel.render(self.size, focus=True)
self.screen.draw_screen(self.size, canvas)
keys = self.screen.get_input()
for k in keys:
if k == "window resize":
self.size = self.screen.get_cols_rows()
else:
try:
toplevel.keypress(self.size, k)
except Exception:
self.show_internal_exc_dlg(sys.exc_info())
return self.quit_event_loop
finally:
self.quit_event_loop = prev_quit_loop
# }}}
# {{{ debugger-facing interface
def interaction(self, exc_tuple, show_exc_dialog=True):
self.current_exc_tuple = exc_tuple
from pudb import VERSION
caption = [(None,
"PuDB %s - ?:help n:next s:step into b:breakpoint "
"!:python command line"
% VERSION)]
if self.debugger.post_mortem:
if show_exc_dialog and exc_tuple is not None:
self.show_exception_dialog(exc_tuple)
caption.extend([
(None, " "),
("warning", "[POST-MORTEM MODE]")
])
elif exc_tuple is not None:
caption.extend([
(None, " "),
("warning", "[PROCESSING EXCEPTION - hit 'e' to examine]")
])
self.caption.set_text(caption)
self.event_loop()
def set_source_code_provider(self, source_code_provider, force_update=False):
if self.source_code_provider != source_code_provider or force_update:
self.source[:] = source_code_provider.get_lines(self)
self.source_code_provider = source_code_provider
self.current_line = None
def show_line(self, line, source_code_provider=None):
"""Updates the UI so that a certain line is currently in view."""
changed_file = False
if source_code_provider is not None:
changed_file = self.source_code_provider != source_code_provider
self.set_source_code_provider(source_code_provider)
line -= 1
if line >= 0 and line < len(self.source):
self.source_list.set_focus(line)
if changed_file:
self.source_list.set_focus_valign("middle")
def set_current_line(self, line, source_code_provider):
"""Updates the UI to show the line currently being executed."""
if self.current_line is not None:
self.current_line.set_current(False)
self.show_line(line, source_code_provider)
line -= 1
if line >= 0 and line < len(self.source):
self.current_line = self.source[line]
self.current_line.set_current(True)
def update_var_view(self, locals=None, globals=None, focus_index=None):
if locals is None:
locals = self.debugger.curframe.f_locals
if globals is None:
globals = self.debugger.curframe.f_globals
from pudb.var_view import make_var_view
self.locals[:] = make_var_view(
self.get_frame_var_info(read_only=True),
locals, globals)
if focus_index is not None:
# Have to set the focus _after_ updating the locals list, as there
# appears to be a brief moment while reseting the list when the
# list is empty but urwid will attempt to set the focus anyway,
# which causes problems.
try:
self.var_list._w.set_focus(focus_index)
except IndexError:
# sigh oh well we tried
pass
def _get_bp_list(self):
return [bp
for fn, bp_lst in self.debugger.get_all_breaks().items()
for lineno in bp_lst
for bp in self.debugger.get_breaks(fn, lineno)
if not bp.temporary]
def _format_fname(self, fname):
from os.path import dirname, basename
name = basename(fname)
if name == "__init__.py":
name = "..."+dirname(fname)[-10:]+"/"+name
return name
def update_breakpoints(self):
self.bp_walker[:] = [
BreakpointFrame(self.debugger.current_bp == (bp.file, bp.line),
self._format_fname(bp.file), bp)
for bp in self._get_bp_list()]
def update_stack(self):
def make_frame_ui(frame_lineno):
frame, lineno = frame_lineno
code = frame.f_code
class_name = None
if code.co_argcount and code.co_varnames[0] == "self":
try:
class_name = frame.f_locals["self"].__class__.__name__
except Exception:
from pudb.lowlevel import ui_log
message = "Failed to determine class name"
ui_log.exception(message)
class_name = "!! %s !!" % message
return StackFrame(frame is self.debugger.curframe,
code.co_name, class_name,
self._format_fname(code.co_filename), lineno)
frame_uis = [make_frame_ui(fl) for fl in self.debugger.stack]
if CONFIG["current_stack_frame"] == "top":
frame_uis = frame_uis[::-1]
elif CONFIG["current_stack_frame"] == "bottom":
pass
else:
raise ValueError("invalid value for 'current_stack_frame' pref")
self.stack_walker[:] = frame_uis
def update_cmdline_win(self):
self.set_cmdline_state(not CONFIG["hide_cmdline_win"])
# }}}
# vim: foldmethod=marker:expandtab:softtabstop=4
| 36.963899
| 106
| 0.547339
|
__copyright__ = """
Copyright (C) 2009-2017 Andreas Kloeckner
Copyright (C) 2014-2017 Aaron Meurer
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urwid
import bdb
import gc
import os
import sys
from itertools import count
from functools import partial
from types import TracebackType
from pudb.lowlevel import decode_lines, ui_log
from pudb.settings import load_config, save_config
CONFIG = load_config()
save_config(CONFIG)
HELP_HEADER = r"""
Key Assignments: Use Arrow Down/Up or Page Down/Up to scroll.
"""
HELP_MAIN = r"""
Keys:
Ctrl-p - edit preferences
n - step over ("next")
s - step into
c - continue
r/f - finish current function
t - run to cursor
e - show traceback [post-mortem or in exception state]
b - set/clear breakpoint
Ctrl-e - open file at current line to edit with $EDITOR
H - move to current line (bottom of stack)
u - move up one stack frame
d - move down one stack frame
o - show console/output screen
m - open module
j/k - down/up
l/h - right/left
Ctrl-f/b - page down/up
Ctrl-d/u - page down/up
G/g - end/home
L - show (file/line) location / go to line
/ - search
,/. - search next/previous
V - focus variables
S - focus stack
B - focus breakpoint list
C - focus code
F1/? - show this help screen
q - quit
Ctrl-r - reload breakpoints from saved-breakpoints file
Ctrl-c - when in continue mode, break back to PuDB
Ctrl-l - redraw screen
Shell-related:
! - open the external shell (configured in the settings)
Ctrl-x - toggle the internal shell focus
+/- - grow/shrink inline shell (active in command line history)
_/= - minimize/maximize inline shell (active in command line history)
Ctrl-v - insert newline
Ctrl-n/p - browse command line history
Tab - yes, there is (simple) tab completion
"""
HELP_SIDE = r"""
Sidebar-related (active in sidebar):
+/- - grow/shrink sidebar
_/= - minimize/maximize sidebar
[/] - grow/shrink relative size of active sidebar box
Keys in variables list:
\/enter/space - expand/collapse
h - collapse
l - expand
d/t/r/s/i/c - show default/type/repr/str/id/custom for this variable
H - toggle highlighting
@ - toggle repetition at top
* - cycle attribute visibility: public/_private/__dunder__
m - toggle method visibility
w - toggle line wrapping
n/insert - add new watch expression
e - edit options (also to delete)
Keys in stack list:
enter - jump to frame
Ctrl-e - open file at line to edit with $EDITOR
Keys in breakpoints list:
enter - jump to breakpoint
b - toggle breakpoint
d - delete breakpoint
e - edit breakpoint
Other keys:
j/k - down/up
l/h - right/left
Ctrl-f/b - page down/up
Ctrl-d/u - page down/up
G/g - end/home
V - focus variables
S - focus stack
B - focus breakpoint list
C - focus code
F1/? - show this help screen
q - quit
Ctrl-l - redraw screen
"""
HELP_LICENSE = r"""
License:
--------
PuDB is licensed to you under the MIT/X Consortium license:
Copyright (c) 2009-16 Andreas Kloeckner and contributors
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
class Debugger(bdb.Bdb):
def __init__(self, stdin=None, stdout=None, term_size=None, steal_output=False,
**kwargs):
bdb.Bdb.__init__(self, **kwargs)
self.ui = DebuggerUI(self, stdin=stdin, stdout=stdout, term_size=term_size)
self.steal_output = steal_output
self.setup_state()
if steal_output:
raise NotImplementedError("output stealing")
from io import StringIO
self.stolen_output = sys.stderr = sys.stdout = StringIO()
sys.stdin = StringIO("")
from pudb.settings import load_breakpoints
for bpoint_descr in load_breakpoints():
self.set_break(*bpoint_descr)
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting:
raise bdb.BdbQuit
if not sys.gettrace():
return None
return self.trace_dispatch
def set_continue(self):
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame:
del frame.f_trace
if frame is self.botframe:
break
frame = frame.f_back
def set_trace(self, frame=None, as_breakpoint=None, paused=True):
if as_breakpoint is None:
if not paused:
as_breakpoint = False
else:
as_breakpoint = True
if frame is None:
frame = thisframe = sys._getframe().f_back
else:
thisframe = frame
# See pudb issue #52. If this works well enough we should upstream to
# stdlib bdb.py.
#self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
thisframe_info = (
self.canonic(thisframe.f_code.co_filename), thisframe.f_lineno)
if thisframe_info not in self.set_traces or self.set_traces[thisframe_info]:
if as_breakpoint:
self.set_traces[thisframe_info] = True
if self.ui.source_code_provider is not None:
self.ui.set_source_code_provider(
self.ui.source_code_provider, force_update=True)
if paused:
self.set_step()
else:
self.set_continue()
sys.settrace(self.trace_dispatch)
else:
return
def save_breakpoints(self):
from pudb.settings import save_breakpoints
save_breakpoints([
bp
for fn, bp_lst in self.get_all_breaks().items()
for lineno in bp_lst
for bp in self.get_breaks(fn, lineno)
if not bp.temporary])
def enter_post_mortem(self, exc_tuple):
self.post_mortem = True
def setup_state(self):
self.bottom_frame = None
self.mainpyfile = ""
self._wait_for_mainpyfile = False
self.current_bp = None
self.post_mortem = False
# Mapping of (filename, lineno) to bool. If True, will stop on the
# set_trace() call at that location.
self.set_traces = {}
def restart(self):
from linecache import checkcache
checkcache()
self.ui.set_source_code_provider(NullSourceCodeProvider())
self.setup_state()
def do_clear(self, arg):
self.clear_bpbynumber(int(arg))
def set_frame_index(self, index):
self.curindex = index
if index < 0 or index >= len(self.stack):
return
self.curframe, lineno = self.stack[index]
filename = self.curframe.f_code.co_filename
import linecache
if not linecache.getlines(filename):
code = self.curframe.f_globals.get("_MODULE_SOURCE_CODE")
if code is not None:
self.ui.set_current_line(lineno,
DirectSourceCodeProvider(
self.curframe.f_code.co_name, code))
else:
self.ui.set_current_line(lineno,
NullSourceCodeProvider())
else:
self.ui.set_current_line(lineno,
FileSourceCodeProvider(self, filename))
self.ui.update_var_view()
self.ui.update_stack()
self.ui.stack_list._w.set_focus(self.ui.translate_ui_stack_index(index))
@staticmethod
def open_file_to_edit(filename, line_number):
if not os.path.isfile(filename):
raise FileNotFoundError(f"'{filename}' not found or is not a file.")
if not line_number:
line_number = 1
editor = os.environ.get("EDITOR", "nano")
import subprocess
subprocess.call([editor, f"+{line_number}", filename], shell=False)
return filename
def move_up_frame(self):
if self.curindex > 0:
self.set_frame_index(self.curindex-1)
def move_down_frame(self):
if self.curindex < len(self.stack)-1:
self.set_frame_index(self.curindex+1)
def get_shortened_stack(self, frame, tb):
stack, index = self.get_stack(frame, tb)
for i, (s_frame, lineno) in enumerate(stack):
if s_frame is self.bottom_frame and index >= i:
stack = stack[i:]
index -= i
return stack, index
def interaction(self, frame, exc_tuple=None, show_exc_dialog=True):
if exc_tuple is None:
tb = None
elif isinstance(exc_tuple, TracebackType):
# For API compatibility with other debuggers, the second variable
# can be a traceback object. In that case, we need to retrieve the
# corresponding exception tuple.
tb = exc_tuple
exc, = (exc for exc in gc.get_referrers(tb)
if getattr(exc, "__traceback__", None) is tb)
exc_tuple = type(exc), exc, tb
else:
tb = exc_tuple[2]
if frame is None and tb is not None:
frame = tb.tb_frame
found_bottom_frame = False
walk_frame = frame
while True:
if walk_frame is self.bottom_frame:
found_bottom_frame = True
break
if walk_frame is None:
break
walk_frame = walk_frame.f_back
if not found_bottom_frame and not self.post_mortem:
return
self.stack, index = self.get_shortened_stack(frame, tb)
if self.post_mortem:
index = len(self.stack)-1
self.set_frame_index(index)
self.ui.call_with_ui(self.ui.interaction, exc_tuple,
show_exc_dialog=show_exc_dialog)
def get_stack_situation_id(self):
return str(id(self.stack[self.curindex][0].f_code))
def user_call(self, frame, argument_list):
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.interaction(frame)
def user_line(self, frame):
if "__exc_tuple__" in frame.f_locals:
del frame.f_locals["__exc_tuple__"]
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
self.bottom_frame = frame
if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno):
self.current_bp = (
self.canonic(frame.f_code.co_filename), frame.f_lineno)
else:
self.current_bp = None
try:
self.ui.update_breakpoints()
self.interaction(frame)
except Exception:
self.ui.show_internal_exc_dlg(sys.exc_info())
def user_return(self, frame, return_value):
if frame.f_code.co_name != "<module>":
frame.f_locals["__return__"] = return_value
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
self.bottom_frame = frame
if "__exc_tuple__" not in frame.f_locals:
self.interaction(frame)
def user_exception(self, frame, exc_tuple):
frame.f_locals["__exc_tuple__"] = exc_tuple
if not self._wait_for_mainpyfile:
self.interaction(frame, exc_tuple)
def _runscript(self, filename):
# Provide separation from current __main__, which is likely
# pudb.__main__ run. Preserving its namespace is not important, and
# having the script share it ensures that, e.g., pickle can find
# types defined there:
# https://github.com/inducer/pudb/issues/331
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({
"__name__": "__main__",
"__file__": filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
statement = 'exec(compile(open("{}").read(), "{}", "exec"))'.format(
filename, filename)
from pudb import set_interrupt_handler
set_interrupt_handler()
self.run(statement)
def _runmodule(self, module_name):
import runpy
mod_name, mod_spec, code = runpy._get_module_details(module_name)
self.mainpyfile = self.canonic(code.co_filename)
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({
"__name__": "__main__",
"__file__": self.mainpyfile,
"__spec__": mod_spec,
"__builtins__": __builtins__,
"__package__": mod_spec.parent,
"__loader__": mod_spec.loader,
})
self._wait_for_mainpyfile = True
self.run(code)
from pudb.ui_tools import make_hotkey_markup, labelled_value, \
SelectableText, SignalWrap, StackFrame, BreakpointFrame
from pudb.var_view import FrameVarInfoKeeper
try:
import curses
except ImportError:
curses = None
from urwid.raw_display import Screen as RawScreen
try:
from urwid.curses_display import Screen as CursesScreen
except ImportError:
CursesScreen = None
class ThreadsafeScreenMixin:
def signal_init(self):
try:
super().signal_init()
except ValueError:
pass
def signal_restore(self):
try:
super().signal_restore()
except ValueError:
pass
class ThreadsafeRawScreen(ThreadsafeScreenMixin, RawScreen):
pass
class ThreadsafeFixedSizeRawScreen(ThreadsafeScreenMixin, RawScreen):
def __init__(self, **kwargs):
self._term_size = kwargs.pop("term_size", None)
super().__init__(**kwargs)
def get_cols_rows(self):
if self._term_size is not None:
return self._term_size
else:
return 80, 24
if curses is not None:
class ThreadsafeCursesScreen(ThreadsafeScreenMixin, RawScreen):
pass
class SourceCodeProvider:
def __ne__(self, other):
return not (self == other)
class NullSourceCodeProvider(SourceCodeProvider):
def __eq__(self, other):
return type(self) == type(other)
def identifier(self):
return "<no source code>"
def get_source_identifier(self):
return None
def clear_cache(self):
pass
def get_lines(self, debugger_ui):
from pudb.source_view import SourceLine
return [
SourceLine(debugger_ui, "<no source code available>"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, "If this is generated code and you would "
"like the source code to show up here,"),
SourceLine(debugger_ui, "add it to linecache.cache, like"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, " import linecache"),
SourceLine(debugger_ui, " linecache.cache[filename] = "
"(size, mtime, lines, fullname)"),
SourceLine(debugger_ui, ""),
SourceLine(debugger_ui, "You can also set the attribute "
"_MODULE_SOURCE_CODE in the module in which this function"),
SourceLine(debugger_ui, "was compiled to a string containing "
"the code."),
]
class FileSourceCodeProvider(SourceCodeProvider):
def __init__(self, debugger, file_name):
self.file_name = debugger.canonic(file_name)
def __eq__(self, other):
return type(self) == type(other) and self.file_name == other.file_name
def identifier(self):
return self.file_name
def get_source_identifier(self):
return self.file_name
def clear_cache(self):
from linecache import clearcache
clearcache()
def get_lines(self, debugger_ui):
from pudb.source_view import SourceLine, format_source
if self.file_name == "<string>":
return [SourceLine(debugger_ui, self.file_name)]
breakpoints = debugger_ui.debugger.get_file_breaks(self.file_name)[:]
breakpoints = [lineno for lineno in breakpoints if
any(bp.enabled
for bp in debugger_ui.debugger.get_breaks(self.file_name, lineno))]
breakpoints += [i for f, i in debugger_ui.debugger.set_traces if f
== self.file_name and debugger_ui.debugger.set_traces[f, i]]
try:
from linecache import getlines
lines = getlines(self.file_name)
return format_source(
debugger_ui, list(decode_lines(lines)), set(breakpoints))
except Exception:
from pudb.lowlevel import format_exception
debugger_ui.message("Could not load source file '{}':\n\n{}".format(
self.file_name, "".join(format_exception(sys.exc_info()))),
title="Source Code Load Error")
return [SourceLine(debugger_ui,
"Error while loading '%s'." % self.file_name)]
class DirectSourceCodeProvider(SourceCodeProvider):
def __init__(self, func_name, code):
self.function_name = func_name
self.code = code
def __eq__(self, other):
return (
type(self) == type(other)
and self.function_name == other.function_name
and self.code is other.code)
def identifier(self):
return "<source code of function %s>" % self.function_name
def get_source_identifier(self):
return None
def clear_cache(self):
pass
def get_lines(self, debugger_ui):
from pudb.source_view import format_source
lines = self.code.splitlines(True)
return format_source(debugger_ui, list(decode_lines(lines)), set())
class DebuggerUI(FrameVarInfoKeeper):
def __init__(self, dbg, stdin, stdout, term_size):
FrameVarInfoKeeper.__init__(self)
self.debugger = dbg
from urwid import AttrMap
from pudb.ui_tools import SearchController
self.search_controller = SearchController(self)
self.last_module_filter = ""
def move_up(w, size, key):
w.keypress(size, "up")
def move_down(w, size, key):
w.keypress(size, "down")
def move_left(w, size, key):
w.keypress(size, "left")
def move_right(w, size, key):
w.keypress(size, "right")
def page_up(w, size, key):
w.keypress(size, "page up")
def page_down(w, size, key):
w.keypress(size, "page down")
def move_home(w, size, key):
w.keypress(size, "home")
def move_end(w, size, key):
w.keypress(size, "end")
def add_vi_nav_keys(widget):
widget.listen("k", move_up)
widget.listen("j", move_down)
widget.listen("h", move_left)
widget.listen("l", move_right)
widget.listen("ctrl b", page_up)
widget.listen("ctrl f", page_down)
widget.listen("ctrl u", page_up)
widget.listen("ctrl d", page_down)
widget.listen("g", move_home)
widget.listen("G", move_end)
def add_help_keys(widget, helpfunc):
widget.listen("f1", helpfunc)
widget.listen("?", helpfunc)
self.source = urwid.SimpleListWalker([])
self.source_list = urwid.ListBox(self.source)
self.source_sigwrap = SignalWrap(self.source_list)
self.source_attr = urwid.AttrMap(self.source_sigwrap, "source")
self.source_hscroll_start = 0
self.cmdline_history = []
self.cmdline_history_position = -1
self.cmdline_contents = urwid.SimpleFocusListWalker([])
self.cmdline_list = urwid.ListBox(self.cmdline_contents)
self.cmdline_edit = urwid.Edit([
("command line prompt", ">>> ")
])
cmdline_edit_attr = urwid.AttrMap(self.cmdline_edit, "command line edit")
self.cmdline_edit_sigwrap = SignalWrap(
cmdline_edit_attr, is_preemptive=True)
def clear_cmdline_history(btn):
del self.cmdline_contents[:]
self.cmdline_edit_bar = urwid.Columns([
self.cmdline_edit_sigwrap,
("fixed", 10, AttrMap(
urwid.Button("Clear", clear_cmdline_history),
"command line clear button", "command line focused button"))
])
self.cmdline_pile = urwid.Pile([
("flow", urwid.Text("Command line: [Ctrl-X]")),
("weight", 1, urwid.AttrMap(self.cmdline_list, "command line output")),
("flow", self.cmdline_edit_bar),
])
self.cmdline_sigwrap = SignalWrap(
urwid.AttrMap(self.cmdline_pile, None, "focused sidebar")
)
self.cmdline_on = not CONFIG["hide_cmdline_win"]
self.cmdline_weight = 1
self.lhs_col = urwid.Pile([
("weight", 5, self.source_attr),
("weight", self.cmdline_weight if self.cmdline_on else 0,
self.cmdline_sigwrap),
])
self.locals = urwid.SimpleListWalker([])
self.var_list = SignalWrap(
urwid.ListBox(self.locals))
self.stack_walker = urwid.SimpleListWalker([])
self.stack_list = SignalWrap(
urwid.ListBox(self.stack_walker))
self.bp_walker = urwid.SimpleListWalker([])
self.bp_list = SignalWrap(
urwid.ListBox(self.bp_walker))
self.rhs_col = urwid.Pile([
("weight", float(CONFIG["variables_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Variables:"))),
AttrMap(self.var_list, "variables"),
]), None, "focused sidebar"),),
("weight", float(CONFIG["stack_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Stack:"))),
AttrMap(self.stack_list, "stack"),
]), None, "focused sidebar"),),
("weight", float(CONFIG["breakpoints_weight"]), AttrMap(urwid.Pile([
("flow", urwid.Text(make_hotkey_markup("_Breakpoints:"))),
AttrMap(self.bp_list, "breakpoint"),
]), None, "focused sidebar"),),
])
self.rhs_col_sigwrap = SignalWrap(self.rhs_col)
def helpside(w, size, key):
help(HELP_HEADER + HELP_SIDE + HELP_MAIN + HELP_LICENSE)
add_vi_nav_keys(self.rhs_col_sigwrap)
add_help_keys(self.rhs_col_sigwrap, helpside)
self.columns = urwid.Columns(
[
("weight", 1, self.lhs_col),
("weight", float(CONFIG["sidebar_width"]),
self.rhs_col_sigwrap),
],
dividechars=1)
self.caption = urwid.Text("")
header = urwid.AttrMap(self.caption, "header")
self.top = SignalWrap(urwid.Frame(
urwid.AttrMap(self.columns, "background"),
header))
def change_rhs_box(name, index, direction, w, size, key):
from pudb.settings import save_config
weight = self.rhs_col.item_types[index][1]
if direction < 0:
if weight > 1/5:
weight /= 1.25
else:
if weight < 5:
weight *= 1.25
CONFIG[name+"_weight"] = weight
save_config(CONFIG)
self.rhs_col.item_types[index] = "weight", weight
self.rhs_col._invalidate()
def get_inspect_info(id_path, read_only=False):
return (self.get_frame_var_info(read_only)
.get_inspect_info(id_path, read_only))
def collapse_current(var, pos, iinfo):
if iinfo.show_detail:
iinfo.show_detail = False
else:
if var.parent is not None:
p_iinfo = get_inspect_info(var.parent.id_path)
p_iinfo.show_detail = False
return self.locals.index(var.parent)
return None
def change_var_state(w, size, key):
var, pos = self.var_list._w.get_focus()
if var is None:
return
iinfo = get_inspect_info(var.id_path)
focus_index = None
if key == "enter" or key == "\\" or key == " ":
iinfo.show_detail = not iinfo.show_detail
elif key == "h":
focus_index = collapse_current(var, pos, iinfo)
elif key == "l":
iinfo.show_detail = True
elif key == "d":
iinfo.display_type = "default"
elif key == "t":
iinfo.display_type = "type"
elif key == "r":
iinfo.display_type = "repr"
elif key == "s":
iinfo.display_type = "str"
elif key == "i":
iinfo.display_type = "id"
elif key == "c":
iinfo.display_type = CONFIG["custom_stringifier"]
elif key == "H":
iinfo.highlighted = not iinfo.highlighted
elif key == "@":
iinfo.repeated_at_top = not iinfo.repeated_at_top
elif key == "*":
levels = ["public", "private", "all", "public"]
iinfo.access_level = levels[levels.index(iinfo.access_level)+1]
elif key == "w":
iinfo.wrap = not iinfo.wrap
elif key == "m":
iinfo.show_methods = not iinfo.show_methods
self.update_var_view(focus_index=focus_index)
def edit_inspector_detail(w, size, key):
var, pos = self.var_list._w.get_focus()
if var is None:
return
fvi = self.get_frame_var_info(read_only=False)
iinfo = fvi.get_inspect_info(var.id_path, read_only=False)
buttons = [
("OK", True),
("Cancel", False),
]
if var.watch_expr is not None:
watch_edit = urwid.Edit([
("label", "Watch expression: ")
], var.watch_expr.expression)
id_segment = [urwid.AttrMap(watch_edit, "value"), urwid.Text("")]
buttons.extend([None, ("Delete", "del")])
title = "Watch Expression Options"
else:
id_segment = [
labelled_value("Identifier Path: ", var.id_path),
urwid.Text(""),
]
title = "Variable Inspection Options"
rb_grp_show = []
rb_show_default = urwid.RadioButton(rb_grp_show, "Default",
iinfo.display_type == "default")
rb_show_type = urwid.RadioButton(rb_grp_show, "Show type()",
iinfo.display_type == "type")
rb_show_repr = urwid.RadioButton(rb_grp_show, "Show repr()",
iinfo.display_type == "repr")
rb_show_str = urwid.RadioButton(rb_grp_show, "Show str()",
iinfo.display_type == "str")
rb_show_id = urwid.RadioButton(rb_grp_show, "Show id()",
iinfo.display_type == "id")
rb_show_custom = urwid.RadioButton(
rb_grp_show, "Show custom (set in prefs)",
iinfo.display_type == CONFIG["custom_stringifier"])
rb_grp_access = []
rb_access_public = urwid.RadioButton(rb_grp_access, "Public members",
iinfo.access_level == "public")
rb_access_private = urwid.RadioButton(
rb_grp_access, "Public and private members",
iinfo.access_level == "private")
rb_access_all = urwid.RadioButton(
rb_grp_access, "All members (including __dunder__)",
iinfo.access_level == "all")
wrap_checkbox = urwid.CheckBox("Line Wrap", iinfo.wrap)
expanded_checkbox = urwid.CheckBox("Expanded", iinfo.show_detail)
highlighted_checkbox = urwid.CheckBox("Highlighted", iinfo.highlighted)
repeated_at_top_checkbox = urwid.CheckBox(
"Repeated at top", iinfo.repeated_at_top)
show_methods_checkbox = urwid.CheckBox(
"Show methods", iinfo.show_methods)
lb = urwid.ListBox(urwid.SimpleListWalker(
id_segment
+ rb_grp_show + [urwid.Text("")]
+ rb_grp_access + [urwid.Text("")]
+ [
wrap_checkbox,
expanded_checkbox,
highlighted_checkbox,
repeated_at_top_checkbox,
show_methods_checkbox,
]))
result = self.dialog(lb, buttons, title=title)
if result is True:
iinfo.show_detail = expanded_checkbox.get_state()
iinfo.wrap = wrap_checkbox.get_state()
iinfo.highlighted = highlighted_checkbox.get_state()
iinfo.repeated_at_top = repeated_at_top_checkbox.get_state()
iinfo.show_methods = show_methods_checkbox.get_state()
if rb_show_default.get_state():
iinfo.display_type = "default"
elif rb_show_type.get_state():
iinfo.display_type = "type"
elif rb_show_repr.get_state():
iinfo.display_type = "repr"
elif rb_show_str.get_state():
iinfo.display_type = "str"
elif rb_show_id.get_state():
iinfo.display_type = "id"
elif rb_show_custom.get_state():
iinfo.display_type = CONFIG["custom_stringifier"]
if rb_access_public.get_state():
iinfo.access_level = "public"
elif rb_access_private.get_state():
iinfo.access_level = "private"
elif rb_access_all.get_state():
iinfo.access_level = "all"
if var.watch_expr is not None:
var.watch_expr.expression = watch_edit.get_edit_text()
elif result == "del":
for i, watch_expr in enumerate(fvi.watches):
if watch_expr is var.watch_expr:
del fvi.watches[i]
self.update_var_view()
def insert_watch(w, size, key):
watch_edit = urwid.Edit([
("label", "Watch expression: ")
])
if self.dialog(
urwid.ListBox(urwid.SimpleListWalker([
urwid.AttrMap(watch_edit, "value")
])),
[
("OK", True),
("Cancel", False),
], title="Add Watch Expression"):
from pudb.var_view import WatchExpression
we = WatchExpression(watch_edit.get_edit_text())
fvi = self.get_frame_var_info(read_only=False)
fvi.watches.append(we)
self.update_var_view()
self.var_list.listen("\\", change_var_state)
self.var_list.listen(" ", change_var_state)
self.var_list.listen("h", change_var_state)
self.var_list.listen("l", change_var_state)
self.var_list.listen("d", change_var_state)
self.var_list.listen("t", change_var_state)
self.var_list.listen("r", change_var_state)
self.var_list.listen("s", change_var_state)
self.var_list.listen("i", change_var_state)
self.var_list.listen("c", change_var_state)
self.var_list.listen("H", change_var_state)
self.var_list.listen("@", change_var_state)
self.var_list.listen("*", change_var_state)
self.var_list.listen("w", change_var_state)
self.var_list.listen("m", change_var_state)
self.var_list.listen("enter", change_var_state)
self.var_list.listen("e", edit_inspector_detail)
self.var_list.listen("n", insert_watch)
self.var_list.listen("insert", insert_watch)
self.var_list.listen("[", partial(change_rhs_box, "variables", 0, -1))
self.var_list.listen("]", partial(change_rhs_box, "variables", 0, 1))
def examine_frame(w, size, key):
_, pos = self.stack_list._w.get_focus()
self.debugger.set_frame_index(self.translate_ui_stack_index(pos))
self.stack_list.listen("enter", examine_frame)
def open_file_editor(file_name, line_number):
file_changed = False
try:
original_modification_time = os.path.getmtime(file_name)
self.screen.stop()
filename_edited = self.debugger.open_file_to_edit(file_name,
line_number)
self.screen.start()
new_modification_time = os.path.getmtime(file_name)
file_changed = new_modification_time - original_modification_time > 0
except Exception:
from traceback import format_exception
self.message("Exception happened when trying to edit the file:"
"\n\n%s" % ("".join(format_exception(*sys.exc_info()))),
title="File Edit Error")
return
if file_changed:
self.message("File is changed, but the execution is continued with"
" the 'old' codebase.\n"
f"Changed file: {filename_edited}\n\n"
"Please quit and restart to see changes",
title="File is changed")
def open_editor_on_stack_frame(w, size, key):
_, pos = self.stack_list._w.get_focus()
index = self.translate_ui_stack_index(pos)
curframe, line_number = self.debugger.stack[index]
file_name = curframe.f_code.co_filename
open_file_editor(file_name, line_number)
self.stack_list.listen("ctrl e", open_editor_on_stack_frame)
def move_stack_top(w, size, key):
self.debugger.set_frame_index(len(self.debugger.stack)-1)
def move_stack_up(w, size, key):
self.debugger.move_up_frame()
def move_stack_down(w, size, key):
self.debugger.move_down_frame()
self.stack_list.listen("H", move_stack_top)
self.stack_list.listen("u", move_stack_up)
self.stack_list.listen("d", move_stack_down)
self.stack_list.listen("[", partial(change_rhs_box, "stack", 1, -1))
self.stack_list.listen("]", partial(change_rhs_box, "stack", 1, 1))
def save_breakpoints(w, size, key):
self.debugger.save_breakpoints()
def delete_breakpoint(w, size, key):
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently delete a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
bp_list = self._get_bp_list()
if bp_list:
_, pos = self.bp_list._w.get_focus()
bp = bp_list[pos]
if bp_source_identifier == bp.file and bp.line-1 < len(self.source):
self.source[bp.line-1].set_breakpoint(False)
err = self.debugger.clear_break(bp.file, bp.line)
if err:
self.message("Error clearing breakpoint:\n" + err)
else:
self.update_breakpoints()
def enable_disable_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is None:
return
bp = self._get_bp_list()[pos]
bp.enabled = not bp.enabled
sline = self.source[bp.line-1]
sline.set_breakpoint(bp.enabled)
self.update_breakpoints()
def examine_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is None:
return
bp = self._get_bp_list()[pos]
if bp.cond is None:
cond = ""
else:
cond = str(bp.cond)
enabled_checkbox = urwid.CheckBox(
"Enabled", bp.enabled)
cond_edit = urwid.Edit([
("label", "Condition: ")
], cond)
ign_count_edit = urwid.IntEdit([
("label", "Ignore the next N times: ")
], bp.ignore)
lb = urwid.ListBox(urwid.SimpleListWalker([
labelled_value("File: ", bp.file),
labelled_value("Line: ", bp.line),
labelled_value("Hits: ", bp.hits),
urwid.Text(""),
enabled_checkbox,
urwid.AttrMap(cond_edit, "value", "value"),
urwid.AttrMap(ign_count_edit, "value", "value"),
]))
result = self.dialog(lb, [
("OK", True),
("Cancel", False),
None,
("Delete", "del"),
("Location", "loc"),
], title="Edit Breakpoint")
if result is True:
bp.enabled = enabled_checkbox.get_state()
bp.ignore = int(ign_count_edit.value())
cond = cond_edit.get_edit_text()
if cond:
bp.cond = cond
else:
bp.cond = None
elif result == "loc":
self.show_line(bp.line,
FileSourceCodeProvider(self.debugger, bp.file))
self.columns.set_focus(0)
elif result == "del":
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently delete a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
if bp_source_identifier == bp.file:
self.source[bp.line-1].set_breakpoint(False)
err = self.debugger.clear_break(bp.file, bp.line)
if err:
self.message("Error clearing breakpoint:\n" + err)
else:
self.update_breakpoints()
def show_breakpoint(w, size, key):
bp_entry, pos = self.bp_list._w.get_focus()
if bp_entry is not None:
bp = self._get_bp_list()[pos]
self.show_line(bp.line,
FileSourceCodeProvider(self.debugger, bp.file))
self.bp_list.listen("enter", show_breakpoint)
self.bp_list.listen("d", delete_breakpoint)
self.bp_list.listen("s", save_breakpoints)
self.bp_list.listen("e", examine_breakpoint)
self.bp_list.listen("b", enable_disable_breakpoint)
self.bp_list.listen("H", move_stack_top)
self.bp_list.listen("[", partial(change_rhs_box, "breakpoints", 2, -1))
self.bp_list.listen("]", partial(change_rhs_box, "breakpoints", 2, 1))
def end():
self.debugger.save_breakpoints()
self.quit_event_loop = True
def next_line(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_next(self.debugger.curframe)
end()
def step(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_step()
end()
def finish(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_return(self.debugger.curframe)
end()
def cont(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
self.debugger.set_continue()
end()
def run_to_cursor(w, size, key):
if self.debugger.post_mortem:
self.message("Post-mortem mode: Can't modify state.")
else:
sline, pos = self.source.get_focus()
lineno = pos+1
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier is None:
self.message(
"Cannot currently set a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
from pudb.lowlevel import get_breakpoint_invalid_reason
invalid_reason = get_breakpoint_invalid_reason(
bp_source_identifier, lineno)
if invalid_reason is not None:
self.message(
"Cannot run to the line you indicated, "
"for the following reason:\n\n"
+ invalid_reason)
else:
err = self.debugger.set_break(
bp_source_identifier, pos+1, temporary=True)
if err:
self.message("Error dealing with breakpoint:\n" + err)
self.debugger.set_continue()
end()
def go_to_line(w, size, key):
_, line = self.source.get_focus()
lineno_edit = urwid.IntEdit([
("label", "Go to Line :")
], None)
if self.dialog(
urwid.ListBox(urwid.SimpleListWalker([
labelled_value("File :",
self.source_code_provider.identifier()),
labelled_value("Current Line :", line+1),
urwid.AttrMap(lineno_edit, "value")
])),
[
("OK", True),
("Cancel", False),
], title="Go to Line Number"):
lineno = min(max(0, int(lineno_edit.value())-1), len(self.source)-1)
self.source.set_focus(lineno)
def scroll_left(w, size, key):
self.source_hscroll_start = max(
0,
self.source_hscroll_start - 4)
for sl in self.source:
sl._invalidate()
def scroll_right(w, size, key):
self.source_hscroll_start += 4
for sl in self.source:
sl._invalidate()
def search(w, size, key):
self.search_controller.open_search_ui()
def search_next(w, size, key):
self.search_controller.perform_search(dir=1, update_search_start=True)
def search_previous(w, size, key):
self.search_controller.perform_search(dir=-1, update_search_start=True)
def toggle_breakpoint(w, size, key):
bp_source_identifier = \
self.source_code_provider.get_source_identifier()
if bp_source_identifier:
sline, pos = self.source.get_focus()
lineno = pos+1
existing_breaks = self.debugger.get_breaks(
bp_source_identifier, lineno)
if existing_breaks:
err = None
for bp in existing_breaks:
if not bp.enabled:
bp.enable()
sline.set_breakpoint(True)
# Unsure about this. Are multiple breakpoints even
# possible?
break
else:
err = self.debugger.clear_break(bp_source_identifier, lineno)
sline.set_breakpoint(False)
else:
file_lineno = (bp_source_identifier, lineno)
if file_lineno in self.debugger.set_traces:
self.debugger.set_traces[file_lineno] = \
not self.debugger.set_traces[file_lineno]
sline.set_breakpoint(self.debugger.set_traces[file_lineno])
return
from pudb.lowlevel import get_breakpoint_invalid_reason
invalid_reason = get_breakpoint_invalid_reason(
bp_source_identifier, pos+1)
if invalid_reason is not None:
do_set = not self.dialog(
urwid.ListBox(
urwid.SimpleListWalker([
urwid.Text(
"The breakpoint you just set may be "
"invalid, for the following reason:\n\n"
+ invalid_reason),
])), [
("Cancel", True),
("Set Anyway", False),
],
title="Possibly Invalid Breakpoint",
focus_buttons=True)
else:
do_set = True
if do_set:
err = self.debugger.set_break(bp_source_identifier, pos+1)
sline.set_breakpoint(True)
else:
err = None
if err:
self.message("Error dealing with breakpoint:\n" + err)
self.update_breakpoints()
else:
self.message(
"Cannot currently set a breakpoint here--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
def pick_module(w, size, key):
from os.path import splitext
import sys
def mod_exists(mod):
if not hasattr(mod, "__file__"):
return False
if mod.__file__ is None:
return False
filename = mod.__file__
base, ext = splitext(filename)
ext = ext.lower()
from os.path import exists
if ext == ".pyc":
return exists(base+".py")
else:
return ext == ".py"
new_mod_text = SelectableText("-- update me --")
new_mod_entry = urwid.AttrMap(new_mod_text,
None, "focused selectable")
def build_filtered_mod_list(filt_string=""):
modules = sorted(name
# mod_exists may change the size of sys.modules,
# causing this to crash. Copy to a list.
for name, mod in list(sys.modules.items())
if mod_exists(mod))
result = [urwid.AttrMap(SelectableText(mod),
None, "focused selectable")
for mod in modules if filt_string in mod]
new_mod_text.set_text("<<< IMPORT MODULE '%s' >>>" % filt_string)
result.append(new_mod_entry)
return result
def show_mod(mod):
filename = self.debugger.canonic(mod.__file__)
base, ext = splitext(filename)
if ext == ".pyc":
ext = ".py"
filename = base+".py"
self.set_source_code_provider(
FileSourceCodeProvider(self.debugger, filename))
self.source_list.set_focus(0)
class FilterEdit(urwid.Edit):
def keypress(self, size, key):
result = urwid.Edit.keypress(self, size, key)
if result is None:
mod_list[:] = build_filtered_mod_list(
self.get_edit_text())
return result
filt_edit = FilterEdit([("label", "Filter: ")],
self.last_module_filter)
mod_list = urwid.SimpleListWalker(
build_filtered_mod_list(filt_edit.get_edit_text()))
lb = urwid.ListBox(mod_list)
w = urwid.Pile([
("flow", urwid.AttrMap(filt_edit, "value")),
("fixed", 1, urwid.SolidFill()),
urwid.AttrMap(lb, "selectable")])
while True:
result = self.dialog(w, [
("OK", True),
("Cancel", False),
("Reload", "reload"),
], title="Pick Module")
self.last_module_filter = filt_edit.get_edit_text()
if result is True:
widget, pos = lb.get_focus()
if widget is new_mod_entry:
new_mod_name = filt_edit.get_edit_text()
try:
__import__(str(new_mod_name))
except Exception:
from traceback import format_exception
self.message(
"Could not import module '{}':\n\n{}".format(
new_mod_name, "".join(
format_exception(*sys.exc_info()))),
title="Import Error")
else:
show_mod(__import__(str(new_mod_name)))
break
else:
show_mod(sys.modules[widget.base_widget.get_text()[0]])
break
elif result is False:
break
elif result == "reload":
widget, pos = lb.get_focus()
if widget is not new_mod_entry:
mod_name = widget.base_widget.get_text()[0]
mod = sys.modules[mod_name]
import importlib
importlib.reload(mod)
self.message("'%s' was successfully reloaded." % mod_name)
if self.source_code_provider is not None:
self.source_code_provider.clear_cache()
self.set_source_code_provider(self.source_code_provider,
force_update=True)
_, pos = self.stack_list._w.get_focus()
self.debugger.set_frame_index(
self.translate_ui_stack_index(pos))
def helpmain(w, size, key):
help(HELP_HEADER + HELP_MAIN + HELP_SIDE + HELP_LICENSE)
self.source_sigwrap.listen("n", next_line)
self.source_sigwrap.listen("s", step)
self.source_sigwrap.listen("f", finish)
self.source_sigwrap.listen("r", finish)
self.source_sigwrap.listen("c", cont)
self.source_sigwrap.listen("t", run_to_cursor)
self.source_sigwrap.listen("L", go_to_line)
self.source_sigwrap.listen("/", search)
self.source_sigwrap.listen(",", search_previous)
self.source_sigwrap.listen(".", search_next)
self.source_sigwrap.listen("b", toggle_breakpoint)
self.source_sigwrap.listen("m", pick_module)
self.source_sigwrap.listen("H", move_stack_top)
self.source_sigwrap.listen("u", move_stack_up)
self.source_sigwrap.listen("d", move_stack_down)
# left/right scrolling have to be handled specially, normal vi keys
# don't cut it
self.source_sigwrap.listen("h", scroll_left)
self.source_sigwrap.listen("l", scroll_right)
add_vi_nav_keys(self.source_sigwrap)
add_help_keys(self.source_sigwrap, helpmain)
def cmdline_get_namespace():
curframe = self.debugger.curframe
from pudb.shell import SetPropagatingDict
return SetPropagatingDict(
[curframe.f_locals, curframe.f_globals],
curframe.f_locals)
def cmdline_tab_complete(w, size, key):
try:
from jedi import Interpreter
except ImportError:
self.add_cmdline_content(
"Tab completion requires jedi to be installed. ",
"command line error")
return
import jedi
from distutils.version import LooseVersion
if LooseVersion(jedi.__version__) < LooseVersion("0.16.0"):
self.add_cmdline_content(
"jedi 0.16.0 is required for Tab completion",
"command line error")
text = self.cmdline_edit.edit_text
pos = self.cmdline_edit.edit_pos
chopped_text = text[:pos]
suffix = text[pos:]
try:
completions = Interpreter(
chopped_text,
[cmdline_get_namespace()]).complete()
except Exception as e:
self.add_cmdline_content(
"Could not tab complete (Jedi error: '%s')" % e,
"command line error")
return
full_completions = [i.name_with_symbols for i in completions]
chopped_completions = [i.complete for i in completions]
def common_prefix(a, b):
for i, (a_i, b_i) in enumerate(zip(a, b)):
if a_i != b_i:
return a[:i]
return a[:max(len(a), len(b))]
common_compl_prefix = None
for completion in chopped_completions:
if common_compl_prefix is None:
common_compl_prefix = completion
else:
common_compl_prefix = common_prefix(
common_compl_prefix, completion)
completed_chopped_text = common_compl_prefix
if completed_chopped_text is None:
return
if (
len(completed_chopped_text) == 0
and len(completions) > 1):
self.add_cmdline_content(
" ".join(full_completions),
"command line output")
return
self.cmdline_edit.edit_text = \
chopped_text+completed_chopped_text+suffix
self.cmdline_edit.edit_pos = (
len(chopped_text)
+ len(completed_chopped_text))
def cmdline_append_newline(w, size, key):
self.cmdline_edit.insert_text("\n")
def cmdline_exec(w, size, key):
cmd = self.cmdline_edit.get_edit_text()
if not cmd:
return
self.add_cmdline_content(">>> " + cmd, "command line input")
if not self.cmdline_history or cmd != self.cmdline_history[-1]:
self.cmdline_history.append(cmd)
self.cmdline_history_position = -1
prev_sys_stdin = sys.stdin
prev_sys_stdout = sys.stdout
prev_sys_stderr = sys.stderr
from io import StringIO
sys.stdin = None
sys.stderr = sys.stdout = StringIO()
try:
eval(compile(cmd, "<pudb command line>", "single"),
cmdline_get_namespace())
except Exception:
tp, val, tb = sys.exc_info()
import traceback
tblist = traceback.extract_tb(tb)
del tblist[:1]
tb_lines = traceback.format_list(tblist)
if tb_lines:
tb_lines.insert(0, "Traceback (most recent call last):\n")
tb_lines[len(tb_lines):] = traceback.format_exception_only(tp, val)
self.add_cmdline_content("".join(tb_lines), "command line error")
else:
self.cmdline_edit.set_edit_text("")
finally:
if sys.stdout.getvalue():
self.add_cmdline_content(sys.stdout.getvalue(),
"command line output")
sys.stdin = prev_sys_stdin
sys.stdout = prev_sys_stdout
sys.stderr = prev_sys_stderr
def cmdline_history_browse(direction):
if self.cmdline_history_position == -1:
self.cmdline_history_position = len(self.cmdline_history)
self.cmdline_history_position += direction
if 0 <= self.cmdline_history_position < len(self.cmdline_history):
self.cmdline_edit.edit_text = \
self.cmdline_history[self.cmdline_history_position]
else:
self.cmdline_history_position = -1
self.cmdline_edit.edit_text = ""
self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text)
def cmdline_history_prev(w, size, key):
cmdline_history_browse(-1)
def cmdline_history_next(w, size, key):
cmdline_history_browse(1)
def cmdline_start_of_line(w, size, key):
self.cmdline_edit.edit_pos = 0
def cmdline_end_of_line(w, size, key):
self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text)
def cmdline_del_word(w, size, key):
pos = self.cmdline_edit.edit_pos
before, after = (
self.cmdline_edit.edit_text[:pos],
self.cmdline_edit.edit_text[pos:])
before = before[::-1]
before = before.lstrip()
i = 0
while i < len(before):
if not before[i].isspace():
i += 1
else:
break
self.cmdline_edit.edit_text = before[i:][::-1] + after
self.cmdline_edit.edit_post = len(before[i:])
def cmdline_del_to_start_of_line(w, size, key):
pos = self.cmdline_edit.edit_pos
self.cmdline_edit.edit_text = self.cmdline_edit.edit_text[pos:]
self.cmdline_edit.edit_pos = 0
def toggle_cmdline_focus(w, size, key):
self.columns.set_focus(self.lhs_col)
if self.lhs_col.get_focus() is self.cmdline_sigwrap:
if CONFIG["hide_cmdline_win"]:
self.set_cmdline_state(False)
self.lhs_col.set_focus(self.search_controller.search_AttrMap
if self.search_controller.search_box else
self.source_attr)
else:
if CONFIG["hide_cmdline_win"]:
self.set_cmdline_state(True)
self.cmdline_pile.set_focus(self.cmdline_edit_bar)
self.lhs_col.set_focus(self.cmdline_sigwrap)
self.cmdline_edit_sigwrap.listen("tab", cmdline_tab_complete)
self.cmdline_edit_sigwrap.listen("ctrl v", cmdline_append_newline)
self.cmdline_edit_sigwrap.listen("enter", cmdline_exec)
self.cmdline_edit_sigwrap.listen("ctrl n", cmdline_history_next)
self.cmdline_edit_sigwrap.listen("ctrl p", cmdline_history_prev)
self.cmdline_edit_sigwrap.listen("esc", toggle_cmdline_focus)
self.cmdline_edit_sigwrap.listen("ctrl d", toggle_cmdline_focus)
self.cmdline_edit_sigwrap.listen("ctrl a", cmdline_start_of_line)
self.cmdline_edit_sigwrap.listen("ctrl e", cmdline_end_of_line)
self.cmdline_edit_sigwrap.listen("ctrl w", cmdline_del_word)
self.cmdline_edit_sigwrap.listen("ctrl u", cmdline_del_to_start_of_line)
self.top.listen("ctrl x", toggle_cmdline_focus)
def set_cmdline_default_size(weight):
self.cmdline_weight = weight
self.set_cmdline_size()
def max_cmdline(w, size, key):
set_cmdline_default_size(5)
def min_cmdline(w, size, key):
set_cmdline_default_size(1/2)
def grow_cmdline(w, size, key):
weight = self.cmdline_weight
if weight < 5:
weight *= 1.25
set_cmdline_default_size(weight)
def shrink_cmdline(w, size, key):
weight = self.cmdline_weight
if weight > 1/2:
weight /= 1.25
set_cmdline_default_size(weight)
self.cmdline_sigwrap.listen("=", max_cmdline)
self.cmdline_sigwrap.listen("+", grow_cmdline)
self.cmdline_sigwrap.listen("_", min_cmdline)
self.cmdline_sigwrap.listen("-", shrink_cmdline)
def max_sidebar(w, size, key):
from pudb.settings import save_config
weight = 5
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def min_sidebar(w, size, key):
from pudb.settings import save_config
weight = 1/5
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def grow_sidebar(w, size, key):
from pudb.settings import save_config
weight = self.columns.column_types[1][1]
if weight < 5:
weight *= 1.25
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
def shrink_sidebar(w, size, key):
from pudb.settings import save_config
weight = self.columns.column_types[1][1]
if weight > 1/5:
weight /= 1.25
CONFIG["sidebar_width"] = weight
save_config(CONFIG)
self.columns.column_types[1] = "weight", weight
self.columns._invalidate()
self.rhs_col_sigwrap.listen("=", max_sidebar)
self.rhs_col_sigwrap.listen("+", grow_sidebar)
self.rhs_col_sigwrap.listen("_", min_sidebar)
self.rhs_col_sigwrap.listen("-", shrink_sidebar)
def show_output(w, size, key):
self.screen.stop()
input("Hit Enter to return:")
self.screen.start()
def reload_breakpoints_and_redisplay():
reload_breakpoints()
curr_line = self.current_line
self.set_source_code_provider(self.source_code_provider,
force_update=True)
if curr_line is not None:
self.current_line = self.source[int(curr_line.line_nr)-1]
self.current_line.set_current(True)
def reload_breakpoints():
self.debugger.clear_all_breaks()
from pudb.settings import load_breakpoints
for bpoint_descr in load_breakpoints():
dbg.set_break(*bpoint_descr)
self.update_breakpoints()
def show_traceback(w, size, key):
if self.current_exc_tuple is not None:
from traceback import format_exception
result = self.dialog(
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(
"".join(format_exception(*self.current_exc_tuple)))])),
[
("Close", "close"),
("Location", "location")
],
title="Exception Viewer",
focus_buttons=True,
bind_enter_esc=False)
if result == "location":
self.debugger.set_frame_index(len(self.debugger.stack)-1)
else:
self.message("No exception available.")
def run_external_cmdline(w, size, key):
self.screen.stop()
curframe = self.debugger.curframe
import pudb.shell as shell
if CONFIG["shell"] == "ipython" and shell.have_ipython():
runner = shell.run_ipython_shell
elif CONFIG["shell"] == "ipython_kernel" and shell.have_ipython():
runner = shell.run_ipython_kernel
elif CONFIG["shell"] == "bpython" and shell.HAVE_BPYTHON:
runner = shell.run_bpython_shell
elif CONFIG["shell"] == "ptpython" and shell.HAVE_PTPYTHON:
runner = shell.run_ptpython_shell
elif CONFIG["shell"] == "ptipython" and shell.HAVE_PTIPYTHON:
runner = shell.run_ptipython_shell
elif CONFIG["shell"] == "classic":
runner = shell.run_classic_shell
else:
try:
if not shell.custom_shell_dict:
from os.path import expanduser
cshell_fname = expanduser(CONFIG["shell"])
with open(cshell_fname) as inf:
exec(compile(inf.read(), cshell_fname, "exec"),
shell.custom_shell_dict,
shell.custom_shell_dict)
except Exception:
print("Error when importing custom shell:")
from traceback import print_exc
print_exc()
print("Falling back to classic shell")
runner = shell.run_classic_shell
else:
if "pudb_shell" not in shell.custom_shell_dict:
print("%s does not contain a function named pudb_shell at "
"the module level." % CONFIG["shell"])
print("Falling back to classic shell")
runner = shell.run_classic_shell
else:
runner = shell.custom_shell_dict["pudb_shell"]
runner(curframe.f_globals, curframe.f_locals)
self.screen.start()
self.update_var_view()
def run_cmdline(w, size, key):
if CONFIG["shell"] == "internal":
return toggle_cmdline_focus(w, size, key)
else:
return run_external_cmdline(w, size, key)
def focus_code(w, size, key):
self.columns.set_focus(self.lhs_col)
self.lhs_col.set_focus(self.source_attr)
class RHColumnFocuser:
def __init__(self, idx):
self.idx = idx
def __call__(subself, w, size, key): focus(self.rhs_col_sigwrap)
self.rhs_col.set_focus(self.rhs_col.widget_list[subself.idx])
def quit(w, size, key):
self.debugger.set_quit()
end()
def do_edit_config(w, size, key):
self.run_edit_config()
def redraw_screen(w, size, key):
self.screen.clear()
def help(pages):
self.message(pages, title="PuDB - The Python Urwid Debugger")
def edit_current_frame(w, size, key):
_, pos = self.source.get_focus()
source_identifier = \
self.source_code_provider.get_source_identifier()
if source_identifier is None:
self.message(
"Cannot edit the current file--"
"source code does not correspond to a file location. "
"(perhaps this is generated code)")
open_file_editor(source_identifier, pos+1)
self.top.listen("o", show_output)
self.top.listen("ctrl r",
lambda w, size, key: reload_breakpoints_and_redisplay())
self.top.listen("!", run_cmdline)
self.top.listen("e", show_traceback)
self.top.listen("C", focus_code)
self.top.listen("V", RHColumnFocuser(0))
self.top.listen("S", RHColumnFocuser(1))
self.top.listen("B", RHColumnFocuser(2))
self.top.listen("q", quit)
self.top.listen("ctrl p", do_edit_config)
self.top.listen("ctrl l", redraw_screen)
self.top.listen("ctrl e", edit_current_frame)
want_curses_display = (
CONFIG["display"] == "curses"
or (
CONFIG["display"] == "auto"
and not (
os.environ.get("TERM", "").startswith("xterm")
or os.environ.get("TERM", "").startswith("rxvt")
)))
if (want_curses_display
and not (stdin is not None or stdout is not None)
and CursesScreen is not None):
self.screen = ThreadsafeCursesScreen()
else:
screen_kwargs = {}
if stdin is not None:
screen_kwargs["input"] = stdin
if stdout is not None:
screen_kwargs["output"] = stdout
if term_size is not None:
screen_kwargs["term_size"] = term_size
if screen_kwargs:
self.screen = ThreadsafeFixedSizeRawScreen(**screen_kwargs)
else:
self.screen = ThreadsafeRawScreen()
del want_curses_display
if curses:
try:
curses.setupterm()
except Exception:
pass
else:
color_support = curses.tigetnum("colors")
if color_support == 256 and isinstance(self.screen, RawScreen):
self.screen.set_terminal_properties(256)
self.setup_palette(self.screen)
self.show_count = 0
self.source_code_provider = None
self.current_line = None
self.quit_event_loop = False
def add_cmdline_content(self, s, attr):
s = s.rstrip("\n")
from pudb.ui_tools import SelectableText
self.cmdline_contents.append(
urwid.AttrMap(SelectableText(s), attr, "focused "+attr))
self.cmdline_list.set_focus_valign("bottom")
self.cmdline_list.set_focus(len(self.cmdline_contents) - 1,
coming_from="above")
self.set_cmdline_state(True)
def reset_cmdline_size(self):
self.lhs_col.item_types[-1] = "weight", \
self.cmdline_weight if self.cmdline_on else 0
def set_cmdline_size(self, weight=None):
if weight is None:
weight = self.cmdline_weight
self.lhs_col.item_types[-1] = "weight", weight
self.lhs_col._invalidate()
def set_cmdline_state(self, state_on):
if state_on != self.cmdline_on:
self.cmdline_on = state_on
self.set_cmdline_size(None if state_on else 0)
def translate_ui_stack_index(self, index):
if CONFIG["current_stack_frame"] == "top":
return len(self.debugger.stack)-1-index
elif CONFIG["current_stack_frame"] == "bottom":
return index
else:
raise ValueError("invalid value for 'current_stack_frame' pref")
def message(self, msg, title="Message", **kwargs):
self.call_with_ui(self.dialog,
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(msg)])),
[("OK", True)], title=title, **kwargs)
def run_edit_config(self):
from pudb.settings import edit_config, save_config
edit_config(self, CONFIG)
save_config(CONFIG)
def dialog(self, content, buttons_and_results,
title=None, bind_enter_esc=True, focus_buttons=False,
extra_bindings=[]):
class ResultSetter:
def __init__(subself, res):
def __call__(subself, btn): oop = [subself.res]
Attr = urwid.AttrMap
if bind_enter_esc:
content = SignalWrap(content)
def enter(w, size, key):
self.quit_event_loop = [True]
def esc(w, size, key):
self.quit_event_loop = [False]
content.listen("enter", enter)
content.listen("esc", esc)
button_widgets = []
for btn_descr in buttons_and_results:
if btn_descr is None:
button_widgets.append(urwid.Text(""))
else:
btn_text, btn_result = btn_descr
button_widgets.append(
Attr(urwid.Button(btn_text, ResultSetter(btn_result)),
"button", "focused button"))
w = urwid.Columns([
content,
("fixed", 15, urwid.ListBox(urwid.SimpleListWalker(button_widgets))),
], dividechars=1)
if focus_buttons:
w.set_focus_column(1)
if title is not None:
w = urwid.Pile([
("flow", urwid.AttrMap(
urwid.Text(title, align="center"),
"dialog title")),
("fixed", 1, urwid.SolidFill()),
w])
class ResultSettingEventHandler:
def __init__(subself, res):
def __call__(subself, w, size, key): oop = [subself.res]
w = SignalWrap(w)
for key, binding in extra_bindings:
if isinstance(binding, str):
w.listen(key, ResultSettingEventHandler(binding))
else:
w.listen(key, binding)
w = urwid.LineBox(w)
w = urwid.Overlay(w, self.top,
align="center",
valign="middle",
width=("relative", 75),
height=("relative", 75),
)
w = Attr(w, "background")
return self.event_loop(w)[0]
@staticmethod
def setup_palette(screen):
may_use_fancy_formats = not hasattr(urwid.escape, "_fg_attr_xterm")
from pudb.theme import get_palette
screen.register_palette(
get_palette(may_use_fancy_formats, CONFIG["theme"]))
def show_exception_dialog(self, exc_tuple):
from traceback import format_exception
desc = (
"The program has terminated abnormally because of an exception.\n\n"
"A full traceback is below. You may recall this traceback at any "
"time using the 'e' key. The debugger has entered post-mortem mode "
"and will prevent further state changes."
)
tb_txt = "".join(format_exception(*exc_tuple))
self._show_exception_dialog(
description=desc,
error_info=tb_txt,
title="Program Terminated for Uncaught Exception",
exit_loop_on_ok=True,
)
def show_internal_exc_dlg(self, exc_tuple):
try:
self._show_internal_exc_dlg(exc_tuple)
except Exception:
ui_log.exception("Error while showing error dialog")
def _show_internal_exc_dlg(self, exc_tuple):
from traceback import format_exception
from pudb import VERSION
desc = (
"Pudb has encountered and safely caught an internal exception.\n\n"
"The full traceback and some other information can be found "
"below. Please report this information, along with details on "
"what you were doing at the time the exception occurred, at: "
"https://github.com/inducer/pudb/issues"
)
error_info = (
"python version: {python}\n"
"pudb version: {pudb}\n"
"urwid version: {urwid}\n"
"{tb}\n"
).format(
python=sys.version.replace("\n", " "),
pudb=VERSION,
urwid=".".join(map(str, urwid.version.VERSION)),
tb="".join(format_exception(*exc_tuple))
)
self._show_exception_dialog(
description=desc,
error_info=error_info,
title="Pudb Internal Exception Encountered",
)
def _show_exception_dialog(self, description, error_info, title,
exit_loop_on_ok=False):
res = self.dialog(
urwid.ListBox(urwid.SimpleListWalker([urwid.Text(
"\n\n".join([description, error_info])
)])),
title=title,
buttons_and_results=[
("OK", exit_loop_on_ok),
("Save traceback", "save"),
],
)
if res == "save":
self._save_traceback(error_info)
def _save_traceback(self, error_info):
try:
from os.path import exists
filename = next(
fname for n in count()
for fname in ["traceback-%d.txt" % n if n else "traceback.txt"]
if not exists(fname)
)
with open(filename, "w") as outf:
outf.write(error_info)
self.message("Traceback saved as %s." % filename, title="Success")
except Exception:
from traceback import format_exception
io_tb_txt = "".join(format_exception(*sys.exc_info()))
self.message(
"An error occurred while trying to write "
"the traceback:\n\n" + io_tb_txt,
title="I/O error")
def show(self):
if self.show_count == 0:
self.screen.start()
self.show_count += 1
def hide(self):
self.show_count -= 1
if self.show_count == 0:
self.screen.stop()
def call_with_ui(self, f, *args, **kwargs):
self.show()
try:
return f(*args, **kwargs)
finally:
self.hide()
def event_loop(self, toplevel=None):
prev_quit_loop = self.quit_event_loop
try:
import pygments
except ImportError:
if not hasattr(self, "pygments_message_shown"):
self.pygments_message_shown = True
self.message("Package 'pygments' not found. "
"Syntax highlighting disabled.")
WELCOME_LEVEL = "e039"
if CONFIG["seen_welcome"] < WELCOME_LEVEL:
CONFIG["seen_welcome"] = WELCOME_LEVEL
from pudb import VERSION
self.message("Welcome to PudB %s!\n\n"
"PuDB is a full-screen, console-based visual debugger for "
"Python. Its goal is to provide all the niceties of modern "
"GUI-based debuggers in a more lightweight and "
"keyboard-friendly package. "
"PuDB allows you to debug code right where you write and test "
"it--in a terminal. If you've worked with the excellent "
"(but nowadays ancient) DOS-based Turbo Pascal or C tools, "
"PuDB's UI might look familiar.\n\n"
"If you're new here, welcome! The help screen "
"(invoked by hitting '?' after this message) should get you "
"on your way.\n"
"\nChanges in version 2021.1:\n\n"
"- Add shortcut to edit files in source and stack view "
"(Gábor Vecsei)\n"
"- Major improvements to the variable view "
"(Michael van der Kamp)\n"
"- Better internal error reporting (Michael van der Kamp)\n"
"\nChanges in version 2020.1:\n\n"
"- Add vi keys for the sidebar (Asbjørn Apeland)\n"
"- Add -m command line switch (Elias Dorneles)\n"
"- Debug forked processes (Jonathan Striebel)\n"
"- Robustness and logging for internal errors "
"(Michael Vanderkamp)\n"
"- 'Reverse' remote debugging (jen6)\n"
"\nChanges in version 2019.2:\n\n"
"- Auto-hide the command line (Mark Blakeney)\n"
"- Improve help and add jump to breakpoint (Mark Blakeney)\n"
"- Drop Py2.6 support\n"
"- Show callable attributes in var view\n"
"- Allow scrolling sidebar with j/k\n"
"- Fix setting breakpoints in Py3.8 (Aaron Meurer)\n"
"\nChanges in version 2019.1:\n\n"
"- Allow 'space' as a key to expand variables (Enrico Troeger)\n"
"- Have a persistent setting on variable visibility \n"
" (Enrico Troeger)\n"
"- Enable/partially automate opening the debugger in another \n"
" terminal (Anton Barkovsky)\n"
"- Make sidebar scrollable with j/k (Clayton Craft)\n"
"- Bug fixes.\n"
"\nChanges in version 2018.1:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.4:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.3:\n\n"
"- Add handling of safely_stringify_for_pudb to allow custom \n"
" per-type stringification.\n"
"- Add support for custom shells.\n"
"- Better support for 2-wide characters in the var view.\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.2:\n\n"
"- Bug fixes.\n"
"\nChanges in version 2017.1.1:\n\n"
"- IMPORTANT: 2017.1 and possibly earlier versions had a \n"
" bug with exponential growth of shell history for the \n"
" 'classic' shell, which (among other problems) could lead\n"
" to slow startup of the classic shell. Check the file\n\n"
" ~/.config/pudb/shell-history\n\n"
" for size (and useful content) and delete/trim as needed.\n"
"\nChanges in version 2017.1:\n\n"
"- Many, many bug fixes (thank you to all who contributed!)\n"
"\nChanges in version 2016.2:\n\n"
"- UI improvements for disabled breakpoints.\n"
"- Bug fixes.\n"
"\nChanges in version 2016.1:\n\n"
"- Fix module browser on Py3.\n"
"\nChanges in version 2015.4:\n\n"
"- Support for (somewhat rudimentary) remote debugging\n"
" through a telnet connection.\n"
"- Fix debugging of generated code in Python 3.\n"
"\nChanges in version 2015.3:\n\n"
"- Disable set_trace lines from the UI (Aaron Meurer)\n"
"- Better control over attribute visibility (Ned Batchelder)\n"
"\nChanges in version 2015.2:\n\n"
"- ptpython support (P. Varet)\n"
"- Improved rxvt support (Louper Rouch)\n"
"- More keyboard shortcuts in the command line"
"(Alex Sheluchin)\n"
"\nChanges in version 2015.1:\n\n"
"- Add solarized theme (Rinat Shigapov)\n"
"- More keyboard shortcuts in the command line"
"(Alexander Corwin)\n"
"\nChanges in version 2014.1:\n\n"
"- Make prompt-on-quit optional (Mike Burr)\n"
"- Make tab completion in the built-in shell saner\n"
"- Fix handling of unicode source\n"
" (reported by Morten Nielsen and Buck Golemon)\n"
"\nChanges in version 2013.5.1:\n\n"
"- Fix loading of saved breakpoint conditions "
"(Antoine Dechaume)\n"
"- Fixes for built-in command line\n"
"- Theme updates\n"
"\nChanges in version 2013.5:\n\n"
"- Add command line window\n"
"- Uses curses display driver when appropriate\n"
"\nChanges in version 2013.4:\n\n"
"- Support for debugging generated code\n"
"\nChanges in version 2013.3.5:\n\n"
"- IPython fixes (Aaron Meurer)\n"
"- Py2/3 configuration fixes (Somchai Smythe)\n"
"- PyPy fixes (Julian Berman)\n"
"\nChanges in version 2013.3.4:\n\n"
"- Don't die if curses doesn't like what stdin/out are\n"
" connected to.\n"
"\nChanges in version 2013.3.3:\n\n"
"- As soon as pudb is loaded, you can break to the debugger by\n"
" evaluating the expression 'pu.db', where 'pu' is a new \n"
" 'builtin' that pudb has rudely shoved into the interpreter.\n"
"\nChanges in version 2013.3.2:\n\n"
"- Don't attempt to do signal handling if a signal handler\n"
" is already set (Fix by Buck Golemon).\n"
"\nChanges in version 2013.3.1:\n\n"
"- Don't ship {ez,distribute}_setup at all.\n"
" It breaks more than it helps.\n"
"\nChanges in version 2013.3:\n\n"
"- Switch to setuptools as a setup helper.\n"
"\nChanges in version 2013.2:\n\n"
"- Even more bug fixes.\n"
"\nChanges in version 2013.1:\n\n"
"- Ctrl-C will now break to the debugger in a way that does\n"
" not terminate the program\n"
"- Lots of bugs fixed\n"
"\nChanges in version 2012.3:\n\n"
"- Python 3 support (contributed by Brad Froehle)\n"
"- Better search box behavior (suggested by Ram Rachum)\n"
"- Made it possible to go back and examine state from "
"'finished' window. (suggested by Aaron Meurer)\n"
"\nChanges in version 2012.2.1:\n\n"
"- Don't touch config files during install.\n"
"\nChanges in version 2012.2:\n\n"
"- Add support for BPython as a shell.\n"
"- You can now run 'python -m pudb script.py' on Py 2.6+.\n"
" '-m pudb.run' still works--but it's four "
"keystrokes longer! :)\n"
"\nChanges in version 2012.1:\n\n"
"- Work around an API change in IPython 0.12.\n"
"\nChanges in version 2011.3.1:\n\n"
"- Work-around for bug in urwid >= 1.0.\n"
"\nChanges in version 2011.3:\n\n"
"- Finer-grained string highlighting "
"(contributed by Aaron Meurer)\n"
"- Prefs tweaks, instant-apply, top-down stack "
"(contributed by Aaron Meurer)\n"
"- Size changes in sidebar boxes (contributed by Aaron Meurer)\n"
"- New theme 'midnight' (contributed by Aaron Meurer)\n"
"- Support for IPython 0.11 (contributed by Chris Farrow)\n"
"- Suport for custom stringifiers "
"(contributed by Aaron Meurer)\n"
"- Line wrapping in variables view "
"(contributed by Aaron Meurer)\n"
"\nChanges in version 2011.2:\n\n"
"- Fix for post-mortem debugging (contributed by 'Sundance')\n"
"\nChanges in version 2011.1:\n\n"
"- Breakpoints saved between sessions\n"
"- A new 'dark vim' theme\n"
"(both contributed by Naveen Michaud-Agrawal)\n"
"\nChanges in version 0.93:\n\n"
"- Stored preferences (no more pesky IPython prompt!)\n"
"- Themes\n"
"- Line numbers (optional)\n"
% VERSION)
from pudb.settings import save_config
save_config(CONFIG)
self.run_edit_config()
try:
if toplevel is None:
toplevel = self.top
self.size = self.screen.get_cols_rows()
self.quit_event_loop = False
while not self.quit_event_loop:
canvas = toplevel.render(self.size, focus=True)
self.screen.draw_screen(self.size, canvas)
keys = self.screen.get_input()
for k in keys:
if k == "window resize":
self.size = self.screen.get_cols_rows()
else:
try:
toplevel.keypress(self.size, k)
except Exception:
self.show_internal_exc_dlg(sys.exc_info())
return self.quit_event_loop
finally:
self.quit_event_loop = prev_quit_loop
# }}}
# {{{ debugger-facing interface
def interaction(self, exc_tuple, show_exc_dialog=True):
self.current_exc_tuple = exc_tuple
from pudb import VERSION
caption = [(None,
"PuDB %s - ?:help n:next s:step into b:breakpoint "
"!:python command line"
% VERSION)]
if self.debugger.post_mortem:
if show_exc_dialog and exc_tuple is not None:
self.show_exception_dialog(exc_tuple)
caption.extend([
(None, " "),
("warning", "[POST-MORTEM MODE]")
])
elif exc_tuple is not None:
caption.extend([
(None, " "),
("warning", "[PROCESSING EXCEPTION - hit 'e' to examine]")
])
self.caption.set_text(caption)
self.event_loop()
def set_source_code_provider(self, source_code_provider, force_update=False):
if self.source_code_provider != source_code_provider or force_update:
self.source[:] = source_code_provider.get_lines(self)
self.source_code_provider = source_code_provider
self.current_line = None
def show_line(self, line, source_code_provider=None):
changed_file = False
if source_code_provider is not None:
changed_file = self.source_code_provider != source_code_provider
self.set_source_code_provider(source_code_provider)
line -= 1
if line >= 0 and line < len(self.source):
self.source_list.set_focus(line)
if changed_file:
self.source_list.set_focus_valign("middle")
def set_current_line(self, line, source_code_provider):
if self.current_line is not None:
self.current_line.set_current(False)
self.show_line(line, source_code_provider)
line -= 1
if line >= 0 and line < len(self.source):
self.current_line = self.source[line]
self.current_line.set_current(True)
def update_var_view(self, locals=None, globals=None, focus_index=None):
if locals is None:
locals = self.debugger.curframe.f_locals
if globals is None:
globals = self.debugger.curframe.f_globals
from pudb.var_view import make_var_view
self.locals[:] = make_var_view(
self.get_frame_var_info(read_only=True),
locals, globals)
if focus_index is not None:
# Have to set the focus _after_ updating the locals list, as there
# appears to be a brief moment while reseting the list when the
# list is empty but urwid will attempt to set the focus anyway,
# which causes problems.
try:
self.var_list._w.set_focus(focus_index)
except IndexError:
# sigh oh well we tried
pass
def _get_bp_list(self):
return [bp
for fn, bp_lst in self.debugger.get_all_breaks().items()
for lineno in bp_lst
for bp in self.debugger.get_breaks(fn, lineno)
if not bp.temporary]
def _format_fname(self, fname):
from os.path import dirname, basename
name = basename(fname)
if name == "__init__.py":
name = "..."+dirname(fname)[-10:]+"/"+name
return name
def update_breakpoints(self):
self.bp_walker[:] = [
BreakpointFrame(self.debugger.current_bp == (bp.file, bp.line),
self._format_fname(bp.file), bp)
for bp in self._get_bp_list()]
def update_stack(self):
def make_frame_ui(frame_lineno):
frame, lineno = frame_lineno
code = frame.f_code
class_name = None
if code.co_argcount and code.co_varnames[0] == "self":
try:
class_name = frame.f_locals["self"].__class__.__name__
except Exception:
from pudb.lowlevel import ui_log
message = "Failed to determine class name"
ui_log.exception(message)
class_name = "!! %s !!" % message
return StackFrame(frame is self.debugger.curframe,
code.co_name, class_name,
self._format_fname(code.co_filename), lineno)
frame_uis = [make_frame_ui(fl) for fl in self.debugger.stack]
if CONFIG["current_stack_frame"] == "top":
frame_uis = frame_uis[::-1]
elif CONFIG["current_stack_frame"] == "bottom":
pass
else:
raise ValueError("invalid value for 'current_stack_frame' pref")
self.stack_walker[:] = frame_uis
def update_cmdline_win(self):
self.set_cmdline_state(not CONFIG["hide_cmdline_win"])
# }}}
# vim: foldmethod=marker:expandtab:softtabstop=4
| true
| true
|
7903937f6bb0416f831585c48a017b3d93a5019d
| 1,474
|
py
|
Python
|
merge/evaluation.py
|
matroshenko/SPLERGE_via_TF
|
1768485985b00fd7dabd726d8d24cbdb947dd143
|
[
"MIT"
] | null | null | null |
merge/evaluation.py
|
matroshenko/SPLERGE_via_TF
|
1768485985b00fd7dabd726d8d24cbdb947dd143
|
[
"MIT"
] | null | null | null |
merge/evaluation.py
|
matroshenko/SPLERGE_via_TF
|
1768485985b00fd7dabd726d8d24cbdb947dd143
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
from merge.model import Model
def run_model_on_random_input(model):
batch_size = 1
height = 100
width = 200
inputs = {
'image': tf.random.uniform(shape=(batch_size, height, width, 3), minval=0, maxval=256, dtype='int32'),
'horz_split_points_probs': tf.random.uniform(shape=(batch_size, height), dtype='float32'),
'vert_split_points_probs': tf.random.uniform(shape=(batch_size, width), dtype='float32'),
'horz_split_points_binary': tf.random.uniform(shape=(batch_size, height), minval=0, maxval=2, dtype='int32'),
'vert_split_points_binary': tf.random.uniform(shape=(batch_size, width), minval=0, maxval=2, dtype='int32')
}
model(inputs)
def load_model(model_file_path, compute_metric):
assert os.path.exists(model_file_path)
model = Model(compute_metric)
run_model_on_random_input(model)
model.load_weights(model_file_path)
# Metric can't be calculated in graph mode.
run_eagerly = True if compute_metric else False
model.compile(run_eagerly=run_eagerly)
return model
def convert_ds_element_to_tuple(element):
input_keys = [
'image',
'horz_split_points_probs',
'vert_split_points_probs',
'horz_split_points_binary',
'vert_split_points_binary'
]
return (
{key: element[key] for key in input_keys},
{
'markup_table': element['markup_table']
}
)
| 33.5
| 117
| 0.681818
|
import os
import tensorflow as tf
from merge.model import Model
def run_model_on_random_input(model):
batch_size = 1
height = 100
width = 200
inputs = {
'image': tf.random.uniform(shape=(batch_size, height, width, 3), minval=0, maxval=256, dtype='int32'),
'horz_split_points_probs': tf.random.uniform(shape=(batch_size, height), dtype='float32'),
'vert_split_points_probs': tf.random.uniform(shape=(batch_size, width), dtype='float32'),
'horz_split_points_binary': tf.random.uniform(shape=(batch_size, height), minval=0, maxval=2, dtype='int32'),
'vert_split_points_binary': tf.random.uniform(shape=(batch_size, width), minval=0, maxval=2, dtype='int32')
}
model(inputs)
def load_model(model_file_path, compute_metric):
assert os.path.exists(model_file_path)
model = Model(compute_metric)
run_model_on_random_input(model)
model.load_weights(model_file_path)
run_eagerly = True if compute_metric else False
model.compile(run_eagerly=run_eagerly)
return model
def convert_ds_element_to_tuple(element):
input_keys = [
'image',
'horz_split_points_probs',
'vert_split_points_probs',
'horz_split_points_binary',
'vert_split_points_binary'
]
return (
{key: element[key] for key in input_keys},
{
'markup_table': element['markup_table']
}
)
| true
| true
|
7903939aab6de4ba538bf96ddafc16c8c872aaee
| 26,827
|
py
|
Python
|
nova/objects/service.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | 1
|
2018-08-19T02:13:16.000Z
|
2018-08-19T02:13:16.000Z
|
nova/objects/service.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | null | null | null |
nova/objects/service.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:13:56.000Z
|
2020-07-22T22:13:56.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova.db import api as db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 35
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
# bump so that we can drive version pins from it. We could include other
# service RPC versions at some point, minimum object versions, etc.
#
# The TestServiceVersion test will fail if the calculated set of
# things differs from the value in the last item of the list below,
# indicating that a version bump is needed.
#
# Also note that there are other reasons we may want to bump this,
# which will not be caught by the test. An example of this would be
# triggering (or disabling) an online data migration once all services
# in the cluster are at the same level.
#
# If a version bump is required for something mechanical, just document
# that generic thing here (like compute RPC version bumps). No need to
# replicate the details from compute/rpcapi.py here. However, for more
# complex service interactions, extra detail should be provided
SERVICE_VERSION_HISTORY = (
# Version 0: Pre-history
{'compute_rpc': '4.0'},
# Version 1: Introduction of SERVICE_VERSION
{'compute_rpc': '4.4'},
# Version 2: Compute RPC version 4.5
{'compute_rpc': '4.5'},
# Version 3: Compute RPC version 4.6
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Compute RPC version 4.7
{'compute_rpc': '4.7'},
# Version 6: Compute RPC version 4.8
{'compute_rpc': '4.8'},
# Version 7: Compute RPC version 4.9
{'compute_rpc': '4.9'},
# Version 8: Compute RPC version 4.10
{'compute_rpc': '4.10'},
# Version 9: Compute RPC version 4.11
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
# Version 11: Compute RPC version 4.12
{'compute_rpc': '4.12'},
# Version 12: The network APIs and compute manager support a NetworkRequest
# object where the network_id value is 'auto' or 'none'. BuildRequest
# objects are populated by nova-api during instance boot.
{'compute_rpc': '4.12'},
# Version 13: Compute RPC version 4.13
{'compute_rpc': '4.13'},
# Version 14: The compute manager supports setting device tags.
{'compute_rpc': '4.13'},
# Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
# is deleted before RPC to nova-compute.
{'compute_rpc': '4.13'},
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
# Version 23: Compute hosts allow pre-creation of the migration object
# for cold migration.
{'compute_rpc': '4.18'},
# Version 24: Add support for Cinder v3 attach/detach API.
{'compute_rpc': '4.18'},
# Version 25: Compute hosts allow migration-based allocations
# for live migration.
{'compute_rpc': '4.18'},
# Version 26: Adds a 'host_list' parameter to build_and_run_instance()
{'compute_rpc': '4.19'},
# Version 27: Compute RPC version 4.20; adds multiattach argument to
# reserve_block_device_name().
{'compute_rpc': '4.20'},
# Version 28: Adds a 'host_list' parameter to prep_resize()
{'compute_rpc': '4.21'},
# Version 29: Compute RPC version 4.22
{'compute_rpc': '4.22'},
# Version 30: Compute RPC version 5.0
{'compute_rpc': '5.0'},
# Version 31: The compute manager checks if 'trusted_certs' are supported
{'compute_rpc': '5.0'},
# Version 32: Add 'file_backed_memory' support. The service version bump is
# needed to allow the destination of a live migration to reject the
# migration if 'file_backed_memory' is enabled and the source does not
# support 'file_backed_memory'
{'compute_rpc': '5.0'},
# Version 33: Add support for check on the server group with
# 'max_server_per_host' rules
{'compute_rpc': '5.0'},
# Version 34: Adds support to abort queued/preparing live migrations.
{'compute_rpc': '5.0'},
# Version 35: Indicates that nova-compute supports live migration with
# ports bound early on the destination host using VIFMigrateData.
{'compute_rpc': '5.0'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_version()
# Version 1.20: Added get_minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# init. The reason is that we want to *ensure* that we're always
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
# all the time, such that create() and save() operations will
# include the current service version.
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'version' in primitive:
del primitive['version']
if _target_version < (1, 14) and 'forced_down' in primitive:
del primitive['forced_down']
if _target_version < (1, 13) and 'last_seen_up' in primitive:
del primitive['last_seen_up']
if _target_version < (1, 10):
# service.compute_node was not lazy-loaded, we need to provide it
# when called
self._do_compute_node(self._context, primitive,
version_manifest)
def _do_compute_node(self, context, primitive, version_manifest):
try:
target_version = version_manifest['ComputeNode']
# NOTE(sbauza): Ironic deployments can have multiple
# nodes for the same service, but for keeping same behaviour,
# returning only the first elem of the list
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version,
version_manifest=version_manifest)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
# NOTE(sbauza); We want to only lazy-load compute_node
continue
elif key == 'version':
# NOTE(danms): Special handling of the version field, since
# it is read_only and set in our init.
setattr(service, base.get_attrname(key), db_service[key])
elif key == 'uuid' and not db_service.get(key):
# Leave uuid off the object if undefined in the database
# so that it will be generated below.
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
# TODO(dpeschman): Drop this once all services have uuids in database
if 'uuid' not in service:
service.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for service %(id)i',
dict(uuid=service.uuid, id=service.id))
service.save()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
# Only n-cpu services have attached compute_node(s)
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
# NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
# we keep it for backwards compatibility
raise exception.ServiceNotFound(service_id=self.id)
# NOTE(sbauza): Ironic deployments can have multiple nodes
# for the same service, but for keeping same behaviour, returning only
# the first elem of the list
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@staticmethod
@db.select_db_reader_mode
def _db_service_get_by_compute_host(context, host, use_slave=False):
return db.service_get_by_compute_host(context, host)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = cls._db_service_get_by_compute_host(context, host,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
def _check_minimum_version(self):
"""Enforce that we are not older that the minimum version.
This is a loose check to avoid creating or updating our service
record if we would do so with a version that is older that the current
minimum of all services. This could happen if we were started with
older code by accident, either due to a rollback or an old and
un-updated node suddenly coming back onto the network.
There is technically a race here between the check and the update,
but since the minimum version should always roll forward and never
backwards, we don't need to worry about doing it atomically. Further,
the consequence for getting this wrong is minor, in that we'll just
fail to send messages that other services understand.
"""
if not self.obj_attr_is_set('version'):
return
if not self.obj_attr_is_set('binary'):
return
minver = self.get_minimum_version(self._context, self.binary)
if minver > self.version:
raise exception.ServiceTooOld(thisver=self.version,
minver=minver)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._check_minimum_version()
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
self._send_notification(fields.NotificationAction.CREATE)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
self._check_minimum_version()
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
self._send_status_update_notification(updates)
def _send_status_update_notification(self, updates):
# Note(gibi): We do not trigger notification on version as that field
# is always dirty, which would cause that nova sends notification on
# every other field change. See the comment in save() too.
if set(updates.keys()).intersection(
{'disabled', 'disabled_reason', 'forced_down'}):
self._send_notification(fields.NotificationAction.UPDATE)
def _send_notification(self, action):
payload = service_notification.ServiceStatusPayload(self)
service_notification.ServiceStatusNotification(
publisher=notification.NotificationPublisher.from_service_obj(
self),
event_type=notification.EventType(
object='service',
action=action),
priority=fields.NotificationPriority.INFO,
payload=payload).emit(self._context)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
self._send_notification(fields.NotificationAction.DELETE)
@classmethod
def enable_min_version_cache(cls):
cls.clear_min_version_cache()
cls._SERVICE_VERSION_CACHING = True
@classmethod
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
@staticmethod
@db.select_db_reader_mode
def _db_service_get_minimum_version(context, binaries, use_slave=False):
return db.service_get_minimum_version(context, binaries)
@base.remotable_classmethod
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version called with likely-incorrect '
'binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(action='get_minimum_version',
reason='Invalid binary prefix')
if (not cls._SERVICE_VERSION_CACHING or
any(binary not in cls._MIN_VERSION_CACHE
for binary in binaries)):
min_versions = cls._db_service_get_minimum_version(
context, binaries, use_slave=use_slave)
if min_versions:
min_versions = {binary: version or 0
for binary, version in
min_versions.items()}
cls._MIN_VERSION_CACHE.update(min_versions)
else:
min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
for binary in binaries}
if min_versions:
version = min(min_versions.values())
else:
version = 0
# NOTE(danms): Since our return value is not controlled by object
# schema, be explicit here.
version = int(version)
return version
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
return cls.get_minimum_version_multi(context, [binary],
use_slave=use_slave)
def get_minimum_version_all_cells(context, binaries, require_all=False):
"""Get the minimum service version, checking all cells.
This attempts to calculate the minimum service version for a set
of binaries across all the cells in the system. If require_all
is False, then any cells that fail to report a version will be
ignored (assuming they won't be candidates for scheduling and thus
excluding them from the minimum version calculation is reasonable).
If require_all is True, then a failing cell will cause this to raise
exception.CellTimeout, as would be appropriate for gating some
data migration until everything is new enough.
Note that services that do not report a positive version are excluded
from this, as it crosses all cells which will naturally not have all
services.
"""
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version_all_cells called with '
'likely-incorrect binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(
action='get_minimum_version_all_cells',
reason='Invalid binary prefix')
# NOTE(danms): Instead of using Service.get_minimum_version_multi(), we
# replicate the call directly to the underlying DB method here because
# we want to defeat the caching and we need to filter non-present
# services differently from the single-cell method.
results = nova_context.scatter_gather_all_cells(
context,
Service._db_service_get_minimum_version,
binaries)
min_version = None
for cell_uuid, result in results.items():
if result is nova_context.did_not_respond_sentinel:
LOG.warning('Cell %s did not respond when getting minimum '
'service version', cell_uuid)
if require_all:
raise exception.CellTimeout()
elif result is nova_context.raised_exception_sentinel:
LOG.warning('Failed to get minimum service version for cell %s',
cell_uuid)
if require_all:
# NOTE(danms): Okay, this isn't necessarily a timeout, but
# it's functionally the same from the caller's perspective
# and we logged the fact that it was actually a failure
# for the forensic investigator during the scatter/gather
# routine.
raise exception.CellTimeout()
else:
# NOTE(danms): Don't consider a zero or None result as the minimum
# since we're crossing cells and will likely not have all the
# services being probed.
relevant_versions = [version for version in result.values()
if version]
if relevant_versions:
min_version_cell = min(relevant_versions)
min_version = (min(min_version, min_version_cell)
if min_version else min_version_cell)
# NOTE(danms): If we got no matches at all (such as at first startup)
# then report that as zero to be consistent with the other such
# methods.
return min_version or 0
@base.NovaObjectRegistry.register
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
# Version 1.11: Service version 1.13
# Version 1.12: Service version 1.14
# Version 1.13: Service version 1.15
# Version 1.14: Service version 1.16
# Version 1.15: Service version 1.17
# Version 1.16: Service version 1.18
# Version 1.17: Service version 1.19
# Version 1.18: Added include_disabled parameter to get_by_binary()
# Version 1.19: Added get_all_computes_by_hv_type()
VERSION = '1.19'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
# NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
# will be removed so both enabled and disabled hosts are returned
@base.remotable_classmethod
def get_by_binary(cls, context, binary, include_disabled=False):
db_services = db.service_get_all_by_binary(
context, binary, include_disabled=include_disabled)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all_computes_by_hv_type(cls, context, hv_type):
db_services = db.service_get_all_computes_by_hv_type(
context, hv_type, include_disabled=False)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| 43.90671
| 79
| 0.65654
|
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova.db import api as db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
SERVICE_VERSION = 35
SERVICE_VERSION_HISTORY = (
{'compute_rpc': '4.0'},
{'compute_rpc': '4.4'},
{'compute_rpc': '4.5'},
{'compute_rpc': '4.6'},
{'compute_rpc': '4.6'},
{'compute_rpc': '4.7'},
{'compute_rpc': '4.8'},
{'compute_rpc': '4.9'},
{'compute_rpc': '4.10'},
{'compute_rpc': '4.11'},
{'compute_rpc': '4.11'},
{'compute_rpc': '4.12'},
{'compute_rpc': '4.12'},
{'compute_rpc': '4.13'},
{'compute_rpc': '4.13'},
{'compute_rpc': '4.13'},
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
# Version 23: Compute hosts allow pre-creation of the migration object
# for cold migration.
{'compute_rpc': '4.18'},
# Version 24: Add support for Cinder v3 attach/detach API.
{'compute_rpc': '4.18'},
# Version 25: Compute hosts allow migration-based allocations
# for live migration.
{'compute_rpc': '4.18'},
# Version 26: Adds a 'host_list' parameter to build_and_run_instance()
{'compute_rpc': '4.19'},
# Version 27: Compute RPC version 4.20; adds multiattach argument to
# reserve_block_device_name().
{'compute_rpc': '4.20'},
# Version 28: Adds a 'host_list' parameter to prep_resize()
{'compute_rpc': '4.21'},
# Version 29: Compute RPC version 4.22
{'compute_rpc': '4.22'},
# Version 30: Compute RPC version 5.0
{'compute_rpc': '5.0'},
# Version 31: The compute manager checks if 'trusted_certs' are supported
{'compute_rpc': '5.0'},
# Version 32: Add 'file_backed_memory' support. The service version bump is
# needed to allow the destination of a live migration to reject the
# migration if 'file_backed_memory' is enabled and the source does not
# support 'file_backed_memory'
{'compute_rpc': '5.0'},
# Version 33: Add support for check on the server group with
# 'max_server_per_host' rules
{'compute_rpc': '5.0'},
# Version 34: Adds support to abort queued/preparing live migrations.
{'compute_rpc': '5.0'},
# Version 35: Indicates that nova-compute supports live migration with
# ports bound early on the destination host using VIFMigrateData.
{'compute_rpc': '5.0'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_version()
# Version 1.20: Added get_minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'version' in primitive:
del primitive['version']
if _target_version < (1, 14) and 'forced_down' in primitive:
del primitive['forced_down']
if _target_version < (1, 13) and 'last_seen_up' in primitive:
del primitive['last_seen_up']
if _target_version < (1, 10):
self._do_compute_node(self._context, primitive,
version_manifest)
def _do_compute_node(self, context, primitive, version_manifest):
try:
target_version = version_manifest['ComputeNode']
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version,
version_manifest=version_manifest)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
continue
elif key == 'version':
setattr(service, base.get_attrname(key), db_service[key])
elif key == 'uuid' and not db_service.get(key):
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
if 'uuid' not in service:
service.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for service %(id)i',
dict(uuid=service.uuid, id=service.id))
service.save()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
raise exception.ServiceNotFound(service_id=self.id)
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@staticmethod
@db.select_db_reader_mode
def _db_service_get_by_compute_host(context, host, use_slave=False):
return db.service_get_by_compute_host(context, host)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = cls._db_service_get_by_compute_host(context, host,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
def _check_minimum_version(self):
if not self.obj_attr_is_set('version'):
return
if not self.obj_attr_is_set('binary'):
return
minver = self.get_minimum_version(self._context, self.binary)
if minver > self.version:
raise exception.ServiceTooOld(thisver=self.version,
minver=minver)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._check_minimum_version()
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
self._send_notification(fields.NotificationAction.CREATE)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
self._check_minimum_version()
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
self._send_status_update_notification(updates)
def _send_status_update_notification(self, updates):
if set(updates.keys()).intersection(
{'disabled', 'disabled_reason', 'forced_down'}):
self._send_notification(fields.NotificationAction.UPDATE)
def _send_notification(self, action):
payload = service_notification.ServiceStatusPayload(self)
service_notification.ServiceStatusNotification(
publisher=notification.NotificationPublisher.from_service_obj(
self),
event_type=notification.EventType(
object='service',
action=action),
priority=fields.NotificationPriority.INFO,
payload=payload).emit(self._context)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
self._send_notification(fields.NotificationAction.DELETE)
@classmethod
def enable_min_version_cache(cls):
cls.clear_min_version_cache()
cls._SERVICE_VERSION_CACHING = True
@classmethod
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
@staticmethod
@db.select_db_reader_mode
def _db_service_get_minimum_version(context, binaries, use_slave=False):
return db.service_get_minimum_version(context, binaries)
@base.remotable_classmethod
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version called with likely-incorrect '
'binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(action='get_minimum_version',
reason='Invalid binary prefix')
if (not cls._SERVICE_VERSION_CACHING or
any(binary not in cls._MIN_VERSION_CACHE
for binary in binaries)):
min_versions = cls._db_service_get_minimum_version(
context, binaries, use_slave=use_slave)
if min_versions:
min_versions = {binary: version or 0
for binary, version in
min_versions.items()}
cls._MIN_VERSION_CACHE.update(min_versions)
else:
min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
for binary in binaries}
if min_versions:
version = min(min_versions.values())
else:
version = 0
# NOTE(danms): Since our return value is not controlled by object
# schema, be explicit here.
version = int(version)
return version
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
return cls.get_minimum_version_multi(context, [binary],
use_slave=use_slave)
def get_minimum_version_all_cells(context, binaries, require_all=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version_all_cells called with '
'likely-incorrect binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(
action='get_minimum_version_all_cells',
reason='Invalid binary prefix')
results = nova_context.scatter_gather_all_cells(
context,
Service._db_service_get_minimum_version,
binaries)
min_version = None
for cell_uuid, result in results.items():
if result is nova_context.did_not_respond_sentinel:
LOG.warning('Cell %s did not respond when getting minimum '
'service version', cell_uuid)
if require_all:
raise exception.CellTimeout()
elif result is nova_context.raised_exception_sentinel:
LOG.warning('Failed to get minimum service version for cell %s',
cell_uuid)
if require_all:
# it's functionally the same from the caller's perspective
# and we logged the fact that it was actually a failure
# for the forensic investigator during the scatter/gather
# routine.
raise exception.CellTimeout()
else:
# NOTE(danms): Don't consider a zero or None result as the minimum
# services being probed.
relevant_versions = [version for version in result.values()
if version]
if relevant_versions:
min_version_cell = min(relevant_versions)
min_version = (min(min_version, min_version_cell)
if min_version else min_version_cell)
# NOTE(danms): If we got no matches at all (such as at first startup)
# then report that as zero to be consistent with the other such
# methods.
return min_version or 0
@base.NovaObjectRegistry.register
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
# Version 1.11: Service version 1.13
# Version 1.12: Service version 1.14
# Version 1.13: Service version 1.15
# Version 1.14: Service version 1.16
# Version 1.15: Service version 1.17
# Version 1.16: Service version 1.18
# Version 1.17: Service version 1.19
# Version 1.18: Added include_disabled parameter to get_by_binary()
# Version 1.19: Added get_all_computes_by_hv_type()
VERSION = '1.19'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
# NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
# will be removed so both enabled and disabled hosts are returned
@base.remotable_classmethod
def get_by_binary(cls, context, binary, include_disabled=False):
db_services = db.service_get_all_by_binary(
context, binary, include_disabled=include_disabled)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all_computes_by_hv_type(cls, context, hv_type):
db_services = db.service_get_all_computes_by_hv_type(
context, hv_type, include_disabled=False)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| true
| true
|
790394d5ea8b5dd0bdf13a15a8555b225ffabcdb
| 698
|
py
|
Python
|
pyimage/contour.py
|
egonw/pyamiimage
|
8e436bae06a0c13a4265a186832e0e679512b7b9
|
[
"Apache-2.0"
] | null | null | null |
pyimage/contour.py
|
egonw/pyamiimage
|
8e436bae06a0c13a4265a186832e0e679512b7b9
|
[
"Apache-2.0"
] | null | null | null |
pyimage/contour.py
|
egonw/pyamiimage
|
8e436bae06a0c13a4265a186832e0e679512b7b9
|
[
"Apache-2.0"
] | null | null | null |
from skimage.measure import find_contours
from skimage import io
from skimage.color import rgb2gray
from matplotlib import pyplot as plt
image = io.imread('contour_finding_test.png')
# image = io.imread('FlowchartDiagram.png')
image = rgb2gray(image)
out = find_contours(image)
print(len(out))
# Find contours at a constant value of 0.8
# contours = find_contours(image, 0.8)
contours = find_contours(image, )
# Display the image and plot all contours found
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# io.imshow(image)
# io.show()
| 24.068966
| 54
| 0.73639
|
from skimage.measure import find_contours
from skimage import io
from skimage.color import rgb2gray
from matplotlib import pyplot as plt
image = io.imread('contour_finding_test.png')
image = rgb2gray(image)
out = find_contours(image)
print(len(out))
contours = find_contours(image, )
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| true
| true
|
79039637a8ba70cae08df88e95efa8bbdd83dbea
| 87,595
|
py
|
Python
|
fpn/symbols/resnet_v1_101_fpn_dcn_rcnn.py
|
chi3x10/RepMet
|
d5b13e01940bbb7ed59dd1ff073e03c0808f76c0
|
[
"Apache-2.0"
] | 103
|
2019-08-16T11:55:04.000Z
|
2022-03-04T16:47:57.000Z
|
fpn/symbols/resnet_v1_101_fpn_dcn_rcnn.py
|
chi3x10/RepMet
|
d5b13e01940bbb7ed59dd1ff073e03c0808f76c0
|
[
"Apache-2.0"
] | 33
|
2019-05-25T08:42:06.000Z
|
2022-03-08T21:32:10.000Z
|
fpn/symbols/resnet_v1_101_fpn_dcn_rcnn.py
|
chi3x10/RepMet
|
d5b13e01940bbb7ed59dd1ff073e03c0808f76c0
|
[
"Apache-2.0"
] | 18
|
2019-09-14T07:35:39.000Z
|
2021-11-25T04:25:20.000Z
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Copyright (c) 2019 IBM Corp
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_dcn_rcnn(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5',
'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7,
sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name='offset_' + name + '_t', data=data, rois=rois, group_size=group_size, pooled_size=pooled_size,
sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name='offset_' + name, data=offset, num_hidden=part_size * part_size * 2, lr_mult=0.01,
weight=self.shared_param_dict['offset_' + param_name + '_weight'], bias=self.shared_param_dict['offset_' + param_name + '_bias'])
offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, part_size, part_size), name='offset_reshape_' + name)
output = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool_' + name, data=data, rois=rois, trans=offset_reshape, group_size=group_size,
pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res2, res3, res4, res5 = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias,
offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias,
offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias,
offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
# for name in self.shared_param_list:
# if 'offset' in name:
# arg_params[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight'])
# else:
# arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
# arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
# self.init_deformable_convnet(cfg, arg_params, aux_params)
# self.init_weight_rcnn(cfg, arg_params, aux_params)
# self.init_weight_fpn(cfg, arg_params, aux_params)
arg_params2, aux_params2 = {}, {}
for name in self.shared_param_list:
if 'offset' in name:
arg_params2[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight'])
else:
arg_params2[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params2[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if (k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape):
arg_params[k] = arg_params2[k]
for k in aux_params2:
if k not in aux_params:
aux_params[k] = aux_params2[k]
| 84.961203
| 180
| 0.616405
|
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_dcn_rcnn(Symbol):
def __init__(self):
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5',
'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7,
sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name='offset_' + name + '_t', data=data, rois=rois, group_size=group_size, pooled_size=pooled_size,
sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name='offset_' + name, data=offset, num_hidden=part_size * part_size * 2, lr_mult=0.01,
weight=self.shared_param_dict['offset_' + param_name + '_weight'], bias=self.shared_param_dict['offset_' + param_name + '_bias'])
offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, part_size, part_size), name='offset_reshape_' + name)
output = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool_' + name, data=data, rois=rois, trans=offset_reshape, group_size=group_size,
pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
res2, res3, res4, res5 = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias,
offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias,
offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias,
offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
arg_params2, aux_params2 = {}, {}
for name in self.shared_param_list:
if 'offset' in name:
arg_params2[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight'])
else:
arg_params2[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params2[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if (k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape):
arg_params[k] = arg_params2[k]
for k in aux_params2:
if k not in aux_params:
aux_params[k] = aux_params2[k]
| true
| true
|
79039699e8da9a86f9037003a59b5e1b506c12b0
| 878
|
py
|
Python
|
hangman_art.py
|
iliescua/Hangman
|
1496e798b0bca5d0ee90abd81d05e98359e82e32
|
[
"MIT"
] | null | null | null |
hangman_art.py
|
iliescua/Hangman
|
1496e798b0bca5d0ee90abd81d05e98359e82e32
|
[
"MIT"
] | null | null | null |
hangman_art.py
|
iliescua/Hangman
|
1496e798b0bca5d0ee90abd81d05e98359e82e32
|
[
"MIT"
] | null | null | null |
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========
''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
logo = '''
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
'''
| 12.911765
| 47
| 0.083144
|
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========
''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
logo = '''
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
'''
| true
| true
|
790396b61bf4d5f37393f78f27b7f46c717a0e4c
| 8,168
|
py
|
Python
|
pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import vlan
class access(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/access-mac-group-rspan-vlan-classification/access. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The access layer characteristics of this interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__vlan',)
_yang_name = 'access'
_rest_name = 'access'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'access-mac-group-rspan-vlan-classification', u'access']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'access']
def _get_vlan(self):
"""
Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
"""
return self.__vlan
def _set_vlan(self, v, load=False):
"""
Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan(self):
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
vlan = __builtin__.property(_get_vlan, _set_vlan)
_pyangbind_elements = {'vlan': vlan, }
| 64.825397
| 995
| 0.727473
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import vlan
class access(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__vlan',)
_yang_name = 'access'
_rest_name = 'access'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'access-mac-group-rspan-vlan-classification', u'access']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'access']
def _get_vlan(self):
return self.__vlan
def _set_vlan(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan(self):
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
vlan = __builtin__.property(_get_vlan, _set_vlan)
_pyangbind_elements = {'vlan': vlan, }
| true
| true
|
7903978e1bf5aefe5653377b87e799583916b62e
| 897
|
py
|
Python
|
Debugging-4/cipher2_0.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
Debugging-4/cipher2_0.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
Debugging-4/cipher2_0.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
def encrypt():
message = raw_input("Enter the message you want to encrypt")
ascii_message = [ord(char)+3 for char in message]
encrypt_message = [ chr(char) for char in ascii_message]
print ''.join(encrypt_message)
def decrypt():
message = raw_input("Enter the message you want to decrypt")
ascii_message = [ord(char)-3 for char in message]
decrypt_message = [ chr(char) for char in ascii_message]
print ''.join(decrypt_message)
flag = True
while flag == True:
choice = raw_input("What do you want to do? \n1. Encrypt a message 2. Decrypt a message \nEnter 'e' or 'd' respectively!")
if choice =='e' or choice=="1":
encrypt()
elif choice == 'd' or choice=="2":
decrypt()
else:
play_again = raw_input("Do you want to try agian or Do you want to exit? (Y/N)")
if play_again == 'Y':
continue
elif play_again == 'N':
break
| 35.88
| 124
| 0.653289
|
def encrypt():
message = raw_input("Enter the message you want to encrypt")
ascii_message = [ord(char)+3 for char in message]
encrypt_message = [ chr(char) for char in ascii_message]
print ''.join(encrypt_message)
def decrypt():
message = raw_input("Enter the message you want to decrypt")
ascii_message = [ord(char)-3 for char in message]
decrypt_message = [ chr(char) for char in ascii_message]
print ''.join(decrypt_message)
flag = True
while flag == True:
choice = raw_input("What do you want to do? \n1. Encrypt a message 2. Decrypt a message \nEnter 'e' or 'd' respectively!")
if choice =='e' or choice=="1":
encrypt()
elif choice == 'd' or choice=="2":
decrypt()
else:
play_again = raw_input("Do you want to try agian or Do you want to exit? (Y/N)")
if play_again == 'Y':
continue
elif play_again == 'N':
break
| false
| true
|
790397efa8cf4438d741c56685a6de6445f3ae7b
| 2,013
|
py
|
Python
|
camcan/utils/file_parsing.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 6
|
2020-11-11T21:26:20.000Z
|
2022-01-18T17:18:45.000Z
|
camcan/utils/file_parsing.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T07:56:17.000Z
|
2022-03-14T07:56:17.000Z
|
camcan/utils/file_parsing.py
|
dengemann/engemann-2020-multimodal-brain-age
|
ceffb1e01658e31d19dfc4dc0be7aff1d6d21af5
|
[
"BSD-3-Clause"
] | 3
|
2020-06-10T08:34:04.000Z
|
2022-03-14T01:37:08.000Z
|
"""Utility functions for parcinging Freesurfer output files."""
from os.path import join
import nibabel as nb
import numpy as np
def _vectorize_fs_surf(file_path):
"""
Read surface information from a file and turn it into a vector.
Parameters
----------
file_path : str
The path to a file with surface data.
Returns
-------
vectorized_data : numpy.ndarray
Extracted data.
"""
img = nb.load(file_path)
in_data = img.get_fdata().squeeze()
return in_data
def get_area(subject_dir, n_points):
"""
Read area information for the given subject and turn it into a vector.
Data for left and right hemispheres are concatenated.
Parameters
----------
subject_dir : str
The directory to files with surface data.
n_points : int
Defines how many points to take from cortex surface.
Returns
-------
: numpy.ndarray
Extracted data.
"""
AREA_FILES = ('lh.area.mgh', 'rh.area.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
def get_thickness(subject_dir, n_points):
"""
Read thickness information for the given subject and turn it into a vector.
Data for left and right hemispheres are concatenated.
Parameters
----------
subject_dir : str
The directory to files with surface data.
n_points : int
Defines how many points to take from cortex surface.
Returns
-------
: numpy.ndarray
Extracted data.
"""
THICKNESS_FILES = ('rh.thickness.mgh', 'lh.thickness.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
| 23.682353
| 79
| 0.655241
|
from os.path import join
import nibabel as nb
import numpy as np
def _vectorize_fs_surf(file_path):
img = nb.load(file_path)
in_data = img.get_fdata().squeeze()
return in_data
def get_area(subject_dir, n_points):
AREA_FILES = ('lh.area.mgh', 'rh.area.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
def get_thickness(subject_dir, n_points):
THICKNESS_FILES = ('rh.thickness.mgh', 'lh.thickness.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
| true
| true
|
79039872cc9abafdc8159212741d7d68cb5e4148
| 1,566
|
py
|
Python
|
services/dbus.py
|
sourceperl/docker.mqttwarn
|
9d87337f766843c8bdee34eba8d29776e7032009
|
[
"MIT"
] | null | null | null |
services/dbus.py
|
sourceperl/docker.mqttwarn
|
9d87337f766843c8bdee34eba8d29776e7032009
|
[
"MIT"
] | null | null | null |
services/dbus.py
|
sourceperl/docker.mqttwarn
|
9d87337f766843c8bdee34eba8d29776e7032009
|
[
"MIT"
] | 2
|
2016-09-03T09:12:17.000Z
|
2020-03-03T11:58:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Fabian Affolter <fabian()affolter-engineering.ch>'
__copyright__ = 'Copyright 2014 Fabian Affolter'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
HAVE_DBUS=True
try:
import dbus
except ImportError:
HAVE_DBUS=False
def plugin(srv, item):
"""Send a message through dbus to the user's desktop."""
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
if not HAVE_DBUS:
srv.logging.error("Cannot send DBUS message; `dbus' module not installed")
return False
text = item.message
summary = item.addrs[0]
app_name = item.get('title', srv.SCRIPTNAME)
replaces_id = 0
service = 'org.freedesktop.Notifications'
path = '/' + service.replace('.', '/')
interface = service
app_icon = '/usr/share/icons/gnome/32x32/places/network-server.png'
expire_timeout = 1000
actions = []
hints = []
try:
srv.logging.debug("Sending message to %s..." % (item.target))
session_bus = dbus.SessionBus()
obj = session_bus.get_object(service, path)
interface = dbus.Interface(obj, interface)
interface.Notify(app_name, replaces_id, app_icon, summary, text,
actions, hints, expire_timeout)
srv.logging.debug("Successfully sent message")
except Exception, e:
srv.logging.error("Error sending message to %s: %s" % (item.target, str(e)))
return False
return True
| 32.625
| 98
| 0.649425
|
__author__ = 'Fabian Affolter <fabian()affolter-engineering.ch>'
__copyright__ = 'Copyright 2014 Fabian Affolter'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
HAVE_DBUS=True
try:
import dbus
except ImportError:
HAVE_DBUS=False
def plugin(srv, item):
"""Send a message through dbus to the user's desktop."""
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
if not HAVE_DBUS:
srv.logging.error("Cannot send DBUS message; `dbus' module not installed")
return False
text = item.message
summary = item.addrs[0]
app_name = item.get('title', srv.SCRIPTNAME)
replaces_id = 0
service = 'org.freedesktop.Notifications'
path = '/' + service.replace('.', '/')
interface = service
app_icon = '/usr/share/icons/gnome/32x32/places/network-server.png'
expire_timeout = 1000
actions = []
hints = []
try:
srv.logging.debug("Sending message to %s..." % (item.target))
session_bus = dbus.SessionBus()
obj = session_bus.get_object(service, path)
interface = dbus.Interface(obj, interface)
interface.Notify(app_name, replaces_id, app_icon, summary, text,
actions, hints, expire_timeout)
srv.logging.debug("Successfully sent message")
except Exception, e:
srv.logging.error("Error sending message to %s: %s" % (item.target, str(e)))
return False
return True
| false
| true
|
7903989fcebaee5cb6e8974cc5e22a12743d250d
| 2,052
|
py
|
Python
|
solution/322. coin-change.py
|
sundaycat/Leetcode-Practice
|
65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a
|
[
"MIT"
] | null | null | null |
solution/322. coin-change.py
|
sundaycat/Leetcode-Practice
|
65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a
|
[
"MIT"
] | null | null | null |
solution/322. coin-change.py
|
sundaycat/Leetcode-Practice
|
65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a
|
[
"MIT"
] | null | null | null |
from typing import List
'''
1. subproblems: dp(amount) the minimum number of coins needed to make changes for amount of S using the given coin denomination
2. guessing: all the available denomination c_i
3. relate subproblems: dp(amount) = min(dp(amount - c_i) + 1) for all possible c_i
Time complexity: O(#subproblems * #coins)
'''
class Solution:
# top down solution
def coinChange(self, coins: List[int], amount: int) -> int:
# for amount less than 1, return 0
if amount < 1:
return 0
memo = {}
def helper(coins, amount):
# for subproblems that we have alreay solve and memorized
if amount in memo:
return memo[amount]
# base case, we reach out the bottom of the tree.
if amount == 0:
return 0
# go through all possible coin denomination(breaches in tree)
dp = float('inf')
for coin in coins:
if coin > amount:
continue
# relate subproblems
dp = min(helper(coins, amount - coin) + 1, dp)
memo[amount] = dp
return dp
helper(coins, amount)
return -1 if memo[amount] == float('inf') else memo[amount]
# bottom-up solution, DAG
def coinChange_2(self, coins: List[int], amount: int) -> int:
memo = [float('inf') for i in range(amount + 1)]
# dp[i] = min{dp[i - c_i] + 1} for all c_i
memo[0] = 0
for i in range(amount + 1):
# check all the states that are reachable by coins to state i
for coin in coins:
if i < coin:
continue
memo[i] = min(memo[i], memo[i - coin] + 1)
print(memo)
return -1 if memo[amount] == float('inf') else memo[amount]
x = Solution()
# rs = x.coinChange([1, 2, 5], 2)
print(x.coinChange_2([1,2,5], 11))
| 28.109589
| 127
| 0.520955
|
from typing import List
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
if amount < 1:
return 0
memo = {}
def helper(coins, amount):
if amount in memo:
return memo[amount]
if amount == 0:
return 0
dp = float('inf')
for coin in coins:
if coin > amount:
continue
dp = min(helper(coins, amount - coin) + 1, dp)
memo[amount] = dp
return dp
helper(coins, amount)
return -1 if memo[amount] == float('inf') else memo[amount]
def coinChange_2(self, coins: List[int], amount: int) -> int:
memo = [float('inf') for i in range(amount + 1)]
memo[0] = 0
for i in range(amount + 1):
for coin in coins:
if i < coin:
continue
memo[i] = min(memo[i], memo[i - coin] + 1)
print(memo)
return -1 if memo[amount] == float('inf') else memo[amount]
x = Solution()
print(x.coinChange_2([1,2,5], 11))
| true
| true
|
7903992ac7de71bacf377fd223285dda8e5412ab
| 24,867
|
py
|
Python
|
tensorflow/python/distribute/cross_device_utils.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 5
|
2018-07-04T22:14:02.000Z
|
2018-07-04T22:21:43.000Z
|
tensorflow/python/distribute/cross_device_utils.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/cross_device_utils.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 1
|
2018-11-30T01:35:01.000Z
|
2018-11-30T01:35:01.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
| 37.004464
| 102
| 0.699441
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
agg_grads = []
num_devices = len(avail_devices)
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
_lock = threading.Lock()
_thread_local = threading.local()
class CollectiveKeys(object):
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
self._group_key = group_key_start
self._group_key_table = dict()
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0]
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
with ops.name_scope('allreduce'):
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values)
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value)
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values()))
else:
return False
| true
| true
|
790399e89ce51b58c898b32b4311c39afd9e7625
| 1,518
|
py
|
Python
|
V2_action-how-are-you.py
|
mikpan/amld19-snips-workshop
|
b7a57c2f2758718de79c33ef163e371277cde3bd
|
[
"MIT"
] | null | null | null |
V2_action-how-are-you.py
|
mikpan/amld19-snips-workshop
|
b7a57c2f2758718de79c33ef163e371277cde3bd
|
[
"MIT"
] | null | null | null |
V2_action-how-are-you.py
|
mikpan/amld19-snips-workshop
|
b7a57c2f2758718de79c33ef163e371277cde3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
INTENT_HOW_ARE_YOU = "mikpan:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
with Hermes("localhost:1883") as h:
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.start()
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
| 31.625
| 80
| 0.755599
|
from hermes_python.hermes import Hermes
INTENT_HOW_ARE_YOU = "mikpan:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
with Hermes("localhost:1883") as h:
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.start()
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
| true
| true
|
79039ae1980eeb4c918c70fc1e15e4c604c8d3eb
| 1,477
|
py
|
Python
|
tests/fuzzer/fuzz_packet.py
|
1ndochine/faucet
|
f207c7af99982b6cad9372172ce94cb077f87997
|
[
"Apache-2.0"
] | 1
|
2018-11-07T14:30:19.000Z
|
2018-11-07T14:30:19.000Z
|
tests/fuzzer/fuzz_packet.py
|
1ndochine/faucet
|
f207c7af99982b6cad9372172ce94cb077f87997
|
[
"Apache-2.0"
] | null | null | null |
tests/fuzzer/fuzz_packet.py
|
1ndochine/faucet
|
f207c7af99982b6cad9372172ce94cb077f87997
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
import logging
import sys
from ryu.controller import dpset
from faucet import faucet
from faucet import faucet_experimental_api
import afl
import fake_packet
ROUNDS = 1
logging.disable(logging.CRITICAL)
def main():
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
application = faucet.Faucet(
dpset=dpset.DPSet(),
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
if application.valves_manager is not None:
for valve in list(application.valves_manager.valves.values()):
state = valve.dp.dyn_finalized
valve.dp.dyn_finalized = False
valve.dp.running = True
valve.dp.dyn_finalized = state
while afl.loop(ROUNDS):
# receive input from afl
rcv = sys.stdin.read()
data = None
try:
data = bytearray.fromhex(rcv) # pytype: disable=missing-parameter
except (ValueError, TypeError):
continue
# create fake packet
_dp = fake_packet.Datapath(1)
msg = fake_packet.Message(datapath=_dp, cookie=15243729, port=1, data=data, in_port=1)
pkt = fake_packet.RyuEvent(msg)
# send fake packet to faucet
application.packet_in_handler(pkt)
if __name__ == "__main__":
main()
| 28.403846
| 94
| 0.67434
|
import logging
import sys
from ryu.controller import dpset
from faucet import faucet
from faucet import faucet_experimental_api
import afl
import fake_packet
ROUNDS = 1
logging.disable(logging.CRITICAL)
def main():
application = faucet.Faucet(
dpset=dpset.DPSet(),
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
if application.valves_manager is not None:
for valve in list(application.valves_manager.valves.values()):
state = valve.dp.dyn_finalized
valve.dp.dyn_finalized = False
valve.dp.running = True
valve.dp.dyn_finalized = state
while afl.loop(ROUNDS):
rcv = sys.stdin.read()
data = None
try:
data = bytearray.fromhex(rcv)
except (ValueError, TypeError):
continue
_dp = fake_packet.Datapath(1)
msg = fake_packet.Message(datapath=_dp, cookie=15243729, port=1, data=data, in_port=1)
pkt = fake_packet.RyuEvent(msg)
application.packet_in_handler(pkt)
if __name__ == "__main__":
main()
| true
| true
|
79039ba82b11c0fad27e6581ec8013ba61682966
| 1,949
|
py
|
Python
|
mlrose/runners/ga_runner.py
|
tadmorgan/mlrose
|
846408b74f999f122156d4724067a003ea68ea47
|
[
"BSD-3-Clause"
] | null | null | null |
mlrose/runners/ga_runner.py
|
tadmorgan/mlrose
|
846408b74f999f122156d4724067a003ea68ea47
|
[
"BSD-3-Clause"
] | null | null | null |
mlrose/runners/ga_runner.py
|
tadmorgan/mlrose
|
846408b74f999f122156d4724067a003ea68ea47
|
[
"BSD-3-Clause"
] | null | null | null |
import mlrose
from mlrose.algorithms.decorators import short_name
from mlrose.runners._runner_base import _RunnerBase
"""
Example usage:
experiment_name = 'example_experiment'
problem = TSPGenerator.generate(seed=SEED, number_of_cities=22)
ga = GARunner(problem=problem,
experiment_name=experiment_name,
output_directory=OUTPUT_DIRECTORY,
seed=SEED,
iteration_list=2 ** np.arange(12),
max_attempts=1000,
population_sizes=[150, 200, 300],
mutation_rates=[0.4, 0.5, 0.6])
# the two data frames will contain the results
df_run_stats, df_run_curves = ga.run()
"""
@short_name('ga')
class GARunner(_RunnerBase):
def __init__(self, problem, experiment_name, seed, iteration_list, population_sizes, mutation_rates,
hamming_factors=None, hamming_factor_decays=None, max_attempts=500, generate_curves=True, **kwargs):
super().__init__(problem=problem, experiment_name=experiment_name, seed=seed, iteration_list=iteration_list,
max_attempts=max_attempts, generate_curves=generate_curves,
**kwargs)
self.population_sizes = population_sizes
self.mutation_rates = mutation_rates
self.hamming_factors = hamming_factors
self.hamming_factor_decays = hamming_factor_decays
def run(self):
return super().run_experiment_(algorithm=mlrose.genetic_alg,
pop_size=('Population Size', self.population_sizes),
mutation_prob=('Mutation Rate', self.mutation_rates),
hamming_factor=('Hamming Factor', self.hamming_factors),
hamming_decay_factor=('Hamming Factor Decay Rate', self.hamming_factor_decays))
| 40.604167
| 118
| 0.623397
|
import mlrose
from mlrose.algorithms.decorators import short_name
from mlrose.runners._runner_base import _RunnerBase
@short_name('ga')
class GARunner(_RunnerBase):
def __init__(self, problem, experiment_name, seed, iteration_list, population_sizes, mutation_rates,
hamming_factors=None, hamming_factor_decays=None, max_attempts=500, generate_curves=True, **kwargs):
super().__init__(problem=problem, experiment_name=experiment_name, seed=seed, iteration_list=iteration_list,
max_attempts=max_attempts, generate_curves=generate_curves,
**kwargs)
self.population_sizes = population_sizes
self.mutation_rates = mutation_rates
self.hamming_factors = hamming_factors
self.hamming_factor_decays = hamming_factor_decays
def run(self):
return super().run_experiment_(algorithm=mlrose.genetic_alg,
pop_size=('Population Size', self.population_sizes),
mutation_prob=('Mutation Rate', self.mutation_rates),
hamming_factor=('Hamming Factor', self.hamming_factors),
hamming_decay_factor=('Hamming Factor Decay Rate', self.hamming_factor_decays))
| true
| true
|
79039be9565aafec6aba170a6f41d7a79040022f
| 606
|
py
|
Python
|
app/config.py
|
bortels/awsfed
|
fea126c63501e5138579efd6127f7ff0550520b2
|
[
"MIT"
] | 2
|
2019-09-25T21:34:21.000Z
|
2019-09-26T20:49:14.000Z
|
app/config.py
|
bortels/awsfed
|
fea126c63501e5138579efd6127f7ff0550520b2
|
[
"MIT"
] | null | null | null |
app/config.py
|
bortels/awsfed
|
fea126c63501e5138579efd6127f7ff0550520b2
|
[
"MIT"
] | null | null | null |
# You should modify this for your own use.
# In particular, set the FQDN to your domain name, and
# pick and set a secure SECRET_KEY. If you are going
# to run HA, you will want to modify the SQLALCHEMY
# variables to point to your shared server rather than
# SQLite3.
import os
ENV = os.environ.get("ENV", "dev")
SECRET_KEY = 'top-secret'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 60 * 60 * 20
BOOTSTRAP_CDN_FORCE_SSL = True
BOOTSTRAP_SERVE_LOCAL = True
SCHEME = "https"
FQDN = f'fed-{ENV}.bortels.us'
URL = f'{SCHEME}://{FQDN}'
| 30.3
| 54
| 0.745875
|
import os
ENV = os.environ.get("ENV", "dev")
SECRET_KEY = 'top-secret'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 60 * 60 * 20
BOOTSTRAP_CDN_FORCE_SSL = True
BOOTSTRAP_SERVE_LOCAL = True
SCHEME = "https"
FQDN = f'fed-{ENV}.bortels.us'
URL = f'{SCHEME}://{FQDN}'
| true
| true
|
79039bfa8caf37ad741b73dddbeb2b6259b56725
| 59,694
|
py
|
Python
|
tools/swarming_client/tests/swarming_test.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/swarming_client/tests/swarming_test.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/swarming_client/tests/swarming_test.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import json
import logging
import os
import re
import StringIO
import sys
import tempfile
import threading
import time
import traceback
import unittest
# net_utils adjusts sys.path.
import net_utils
from depot_tools import auto_stub
import auth
import isolateserver
import swarming
import test_utils
from depot_tools import fix_encoding
from utils import file_path
from utils import logging_utils
from utils import subprocess42
from utils import tools
import httpserver_mock
import isolateserver_mock
FILE_HASH = u'1' * 40
TEST_NAME = u'unit_tests'
OUTPUT = 'Ran stuff\n'
SHARD_OUTPUT_1 = 'Shard 1 of 3.'
SHARD_OUTPUT_2 = 'Shard 2 of 3.'
SHARD_OUTPUT_3 = 'Shard 3 of 3.'
def gen_yielded_data(index, **kwargs):
"""Returns an entry as it would be yielded by yield_results()."""
return index, gen_result_response(**kwargs)
def get_results(keys, output_collector=None):
"""Simplifies the call to yield_results().
The timeout is hard-coded to 10 seconds.
"""
return list(
swarming.yield_results(
'https://host:9001', keys, 10., None, True,
output_collector, False, True))
def collect(url, task_ids, task_stdout=('console', 'json')):
"""Simplifies the call to swarming.collect()."""
return swarming.collect(
swarming=url,
task_ids=task_ids,
timeout=10,
decorate=True,
print_status_updates=True,
task_summary_json=None,
task_output_dir=None,
task_output_stdout=task_stdout,
include_perf=False)
def main(args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
dispatcher = swarming.subcommand.CommandDispatcher('swarming')
return dispatcher.execute(swarming.OptionParserSwarming(), args)
def gen_properties(**kwargs):
out = {
'caches': [],
'cipd_input': None,
'command': None,
'relative_cwd': None,
'dimensions': [
{'key': 'foo', 'value': 'bar'},
{'key': 'os', 'value': 'Mac'},
],
'env': [],
'env_prefixes': [],
'execution_timeout_secs': 60,
'extra_args': ['--some-arg', '123'],
'grace_period_secs': 30,
'idempotent': False,
'inputs_ref': {
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
'io_timeout_secs': 60,
'outputs': [],
'secret_bytes': None,
}
out.update(kwargs)
return out
def gen_request_data(properties=None, **kwargs):
out = {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(**(properties or {})),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
}
out.update(kwargs)
return out
def gen_request_response(request, **kwargs):
# As seen in services/swarming/handlers_api.py.
out = {
'request': request.copy(),
'task_id': '12300',
}
out.update(kwargs)
return out
def gen_result_response(**kwargs):
out = {
u'bot_id': u'swarm6',
u'completed_ts': u'2014-09-24T13:49:16.012345',
u'created_ts': u'2014-09-24T13:49:03.012345',
u'duration': 0.9636809825897217,
u'exit_code': 0,
u'failure': False,
u'internal_failure': False,
u'modified_ts': u'2014-09-24T13:49:17.012345',
u'name': u'heartbeat-canary-2014-09-24_13:49:01-os=Ubuntu',
u'server_versions': [u'1'],
u'started_ts': u'2014-09-24T13:49:09.012345',
u'state': 'COMPLETED',
u'tags': [u'cpu:x86', u'priority:100', u'user:joe@localhost'],
u'task_id': u'10100',
u'try_number': 1,
u'user': u'joe@localhost',
}
out.update(kwargs)
return out
# Silence pylint 'Access to a protected member _Event of a client class'.
class NonBlockingEvent(threading._Event): # pylint: disable=W0212
"""Just like threading.Event, but a class and ignores timeout in 'wait'.
Intended to be used as a mock for threading.Event in tests.
"""
def wait(self, timeout=None):
return super(NonBlockingEvent, self).wait(0)
class SwarmingServerHandler(httpserver_mock.MockHandler):
"""An extremely minimal implementation of the swarming server API v1.0."""
def do_GET(self):
logging.info('S GET %s', self.path)
if self.path == '/auth/api/v1/server/oauth_config':
self.send_json({
'client_id': 'c',
'client_not_so_secret': 's',
'primary_url': self.server.url})
elif self.path == '/auth/api/v1/accounts/self':
self.send_json({'identity': 'user:joe', 'xsrf_token': 'foo'})
else:
m = re.match(r'/api/swarming/v1/task/(\d+)/request', self.path)
if m:
logging.info('%s', m.group(1))
self.send_json(self.server.tasks[int(m.group(1))])
else:
self.send_json( {'a': 'b'})
#raise NotImplementedError(self.path)
def do_POST(self):
logging.info('POST %s', self.path)
raise NotImplementedError(self.path)
class MockSwarmingServer(httpserver_mock.MockServer):
_HANDLER_CLS = SwarmingServerHandler
def __init__(self):
super(MockSwarmingServer, self).__init__()
self._server.tasks = {}
class Common(object):
def setUp(self):
self._tempdir = None
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
self.mock(logging_utils, 'prepare_logging', lambda *args: None)
self.mock(logging_utils, 'set_console_level', lambda *args: None)
def tearDown(self):
if self._tempdir:
file_path.rmtree(self._tempdir)
if not self.has_failed():
self._check_output('', '')
@property
def tempdir(self):
"""Creates the directory on first reference."""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix=u'swarming_test')
return self._tempdir
maxDiff = None
def _check_output(self, out, err):
self.assertMultiLineEqual(out, sys.stdout.getvalue())
self.assertMultiLineEqual(err, sys.stderr.getvalue())
# Flush their content by mocking them again.
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
def main_safe(self, args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
# pylint: disable=bare-except
try:
return main(args)
except:
data = '%s\nSTDOUT:\n%s\nSTDERR:\n%s' % (
traceback.format_exc(), sys.stdout.getvalue(), sys.stderr.getvalue())
self.fail(data)
class NetTestCase(net_utils.TestCase, Common):
"""Base class that defines the url_open mock."""
def setUp(self):
net_utils.TestCase.setUp(self)
Common.setUp(self)
self.mock(time, 'sleep', lambda _: None)
self.mock(subprocess42, 'call', lambda *_: self.fail())
self.mock(threading, 'Event', NonBlockingEvent)
class TestIsolated(auto_stub.TestCase, Common):
"""Test functions with isolated_ prefix."""
def setUp(self):
auto_stub.TestCase.setUp(self)
Common.setUp(self)
self._isolate = isolateserver_mock.MockIsolateServer()
self._swarming = MockSwarmingServer()
def tearDown(self):
try:
self._isolate.close()
self._swarming.close()
finally:
Common.tearDown(self)
auto_stub.TestCase.tearDown(self)
def test_reproduce_isolated(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
# 'out' is the default value for --output-dir.
outdir = os.path.join(self.tempdir, 'out')
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(
[sys.executable, u'main.py', u'foo', outdir, '--bar'], cmd)
expected = os.environ.copy()
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(os.path.abspath('work')), cwd)
return 0
self.mock(subprocess42, 'call', call)
main_hash = self._isolate.add_content_compressed(
'default-gzip', 'not executed')
isolated = {
'files': {
'main.py': {
'h': main_hash,
's': 12,
'm': 0700,
},
},
'command': ['python', 'main.py'],
}
isolated_hash = self._isolate.add_content_compressed(
'default-gzip', json.dumps(isolated))
self._swarming._server.tasks[123] = {
'properties': {
'inputs_ref': {
'isolatedserver': self._isolate.url,
'namespace': 'default-gzip',
'isolated': isolated_hash,
},
'extra_args': ['foo', '${ISOLATED_OUTDIR}'],
'secret_bytes': None,
},
}
ret = self.main_safe(
[
'reproduce', '--swarming', self._swarming.url, '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
class TestSwarmingTrigger(NetTestCase):
def test_trigger_task_shards_2_shards(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request_1 = swarming.task_request_to_raw_request(task_request)
request_1['name'] = u'unit_tests:0:2'
request_1['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '0'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_1 = gen_request_response(request_1)
request_2 = swarming.task_request_to_raw_request(task_request)
request_2['name'] = u'unit_tests:1:2'
request_2['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '1'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_2 = gen_request_response(request_2, task_id='12400')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_1},
result_1,
),
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_2},
result_2,
),
])
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
task_request=task_request,
shards=2)
expected = {
u'unit_tests:0:2': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
u'unit_tests:1:2': {
'shard_index': 1,
'task_id': '12400',
'view_url': 'https://localhost:1/user/task/12400',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shards_priority_override(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
self.assertEqual('123', request['parent_task_id'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_cipd_package(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=swarming.CipdInput(
client_package=None,
packages=[
swarming.CipdPackage(
package_name='mypackage',
path='path/to/package',
version='abc123')],
server=None),
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
expected = {
'client_package': None,
'packages': [{
'package_name': 'mypackage',
'path': 'path/to/package',
'version': 'abc123',
}],
'server': None
}
self.assertEqual(
expected, request['task_slices'][0]['properties']['cipd_input'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
class TestSwarmingCollection(NetTestCase):
def test_success(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT)]
self.assertEqual(expected, get_results(['10100']))
def test_failure(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT, exit_code=1)]
self.assertEqual(expected, get_results(['10100']))
def test_no_ids(self):
actual = get_results([])
self.assertEqual([], actual)
def test_url_errors(self):
self.mock(logging, 'error', lambda *_, **__: None)
# NOTE: get_results() hardcodes timeout=10.
now = {}
lock = threading.Lock()
def get_now():
t = threading.current_thread()
with lock:
return now.setdefault(t, range(10)).pop(0)
self.mock(swarming.net, 'sleep_before_retry', lambda _x, _y: None)
self.mock(swarming, 'now', get_now)
# The actual number of requests here depends on 'now' progressing to 10
# seconds. It's called once per loop. Loop makes 9 iterations.
self.expected_requests(
9 * [
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
None,
)
])
actual = get_results(['10100'])
self.assertEqual([], actual)
self.assertTrue(all(not v for v in now.itervalues()), now)
def test_many_shards(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3),
]
actual = get_results(['10100', '10200', '10300'])
self.assertEqual(expected, sorted(actual))
def test_output_collector_called(self):
# Three shards, one failed. All results are passed to output collector.
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
class FakeOutputCollector(object):
def __init__(self):
self.results = []
self._lock = threading.Lock()
def process_shard_result(self, index, result):
with self._lock:
self.results.append((index, result))
output_collector = FakeOutputCollector()
get_results(['10100', '10200', '10300'], output_collector)
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3, exit_code=1),
]
self.assertEqual(sorted(expected), sorted(output_collector.results))
def test_collect_nothing(self):
self.mock(swarming, 'yield_results', lambda *_: [])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
self._check_output('', 'Results from some shards are missing: 0, 1\n')
def test_collect_success(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_success_nostdout(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100'], []))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_fail(self):
data = gen_result_response(output='Foo', exit_code=-9)
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(-9, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+-------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+-------------------------------------------------------+',
'Foo',
'+-------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: -9 |',
'+-------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_one_missing(self):
data = gen_result_response(output='Foo')
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'',
'Total duration: 1.0s',
''))
self._check_output(expected, 'Results from some shards are missing: 1\n')
def test_collect_multi(self):
actual_calls = []
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks):
self.assertIs(storage.__class__, isolateserver.Storage)
self.assertIs(cache.__class__, isolateserver.MemoryCache)
# Ensure storage is pointing to required location.
self.assertEqual('https://localhost:2', storage.location)
self.assertEqual('default', storage.namespace)
self.assertEqual(False, use_symlinks)
actual_calls.append((isolated_hash, outdir))
self.mock(isolateserver, 'fetch_isolated', fetch_isolated)
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index in xrange(2):
collector.process_shard_result(
index,
gen_result_response(
outputs_ref={
'isolated': str(index) * 40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
}))
summary = collector.finalize()
expected_calls = [
('0'*40, os.path.join(self.tempdir, '0')),
('1'*40, os.path.join(self.tempdir, '1')),
]
self.assertEqual(expected_calls, actual_calls)
# Ensure collected summary is correct.
outputs_refs = [
{
'isolated': '0'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '0'*40,
},
{
'isolated': '1'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '1'*40,
},
]
expected = {
'shards': [gen_result_response(outputs_ref=o) for o in outputs_refs],
}
self.assertEqual(expected, summary)
# Ensure summary dumped to a file is correct as well.
with open(os.path.join(self.tempdir, 'summary.json'), 'r') as f:
summary_dump = json.load(f)
self.assertEqual(expected, summary_dump)
def test_ensures_same_server(self):
self.mock(logging, 'error', lambda *_: None)
# Two shard results, attempt to use different servers.
actual_calls = []
self.mock(
isolateserver, 'fetch_isolated',
lambda *args: actual_calls.append(args))
data = [
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server1',
'namespace': 'namespace',
'isolated':'hash1',
}),
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server2',
'namespace': 'namespace',
'isolated':'hash1',
}),
]
# Feed them to collector.
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index, result in enumerate(data):
collector.process_shard_result(index, result)
collector.finalize()
# Only first fetch is made, second one is ignored.
self.assertEqual(1, len(actual_calls))
isolated_hash, storage, _, outdir, _ = actual_calls[0]
self.assertEqual(
('hash1', os.path.join(self.tempdir, '0')),
(isolated_hash, outdir))
self.assertEqual('https://server1', storage.location)
class TestMain(NetTestCase):
# Tests calling main().
def test_bot_delete(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/foo/delete',
{'method': 'POST', 'data': {}},
{},
),
])
ret = self.main_safe(
['bot_delete', '--swarming', 'https://localhost:1', 'foo', '--force'])
self._check_output('', '')
self.assertEqual(0, ret)
def test_trigger_raw_cmd(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_raw_cmd_isolated(self):
# Minimalist use.
request = {
'name': u'None/foo=bar/' + FILE_HASH,
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
},
io_timeout_secs=1200),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--isolate-server', 'https://localhost:2',
'--isolated', FILE_HASH,
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar/' + FILE_HASH + u'\n'
u'To collect results, use:\n'
u' swarming.py collect -S https://localhost:1 12300\n'
u'Or visit:\n'
u' https://localhost:1/user/task/12300\n',
u'')
def test_trigger_raw_cmd_with_service_account(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200),
},
],
'service_account': 'bot',
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--service-account', 'bot',
'--raw-cmd',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_hash(self):
# pylint: disable=unused-argument
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_and_json(self):
# pylint: disable=unused-argument
write_json_calls = []
self.mock(tools, 'write_json', lambda *args: write_json_calls.append(args))
subprocess_calls = []
self.mock(subprocess42, 'call', lambda *c: subprocess_calls.append(c))
self.mock(swarming, 'now', lambda: 123456)
isolated = os.path.join(self.tempdir, 'zaz.isolated')
content = '{}'
with open(isolated, 'wb') as f:
f.write(content)
isolated_hash = isolateserver_mock.hash_content(content)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--idempotent',
'--task-name', 'unit_tests',
'--dump-json', 'foo.json',
'--isolated', isolated_hash,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self.assertEqual([], subprocess_calls)
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 --json foo.json\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
expected = [
(
u'foo.json',
{
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
},
True,
),
]
self.assertEqual(expected, write_json_calls)
def test_trigger_cipd(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
cipd_input={
'client_package': None,
'packages': [
{
'package_name': 'super/awesome/pkg',
'path': 'path/to/pkg',
'version': 'version:42',
},
],
'server': None,
},
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--cipd-package', 'path/to/pkg:super/awesome/pkg:version:42',
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_no_request(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host',
'--isolate-server', 'https://host', '-T', 'foo',
'-d', 'os', 'amgia',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]\n'
'\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_env_vars(self):
with self.assertRaises(SystemExit):
main(['trigger'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_swarming_env_var(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'ISOLATE_SERVER': 'https://host'}):
main(['trigger', '-T' 'foo', 'foo.isolated'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_isolate_server(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'SWARMING_SERVER': 'https://host'}):
main(['trigger', 'foo.isolated', '-d', 'os', 'amiga'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_dimension(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host', '--raw-cmd', '--', 'foo',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Please at least specify one --dimension\n')
def test_collect_default_json(self):
j = os.path.join(self.tempdir, 'foo.json')
data = {
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
relative_cwd='deeep'),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
}
with open(j, 'wb') as f:
json.dump(data, f)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://host', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(3670., timeout)
self.assertEqual(True, decorate)
self.assertEqual(True, print_status_updates)
self.assertEqual('/a', task_summary_json)
self.assertEqual('/b', task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
self.mock(swarming, 'collect', stub_collect)
self.main_safe(
['collect', '--swarming', 'https://host', '--json', j, '--decorate',
'--print-status-updates', '--task-summary-json', '/a',
'--task-output-dir', '/b', '--task-output-stdout', 'all'])
self._check_output('Fake output\n', '')
def test_post(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
self.expected_requests(
[
(
'http://localhost:1/api/swarming/v1/tasks/new',
{'data': '{"a":"b"}', 'method': 'POST'},
'{"yo":"dawg"}',
{},
),
])
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(0, ret)
self.assertEqual('{"yo":"dawg"}', out.getvalue())
self.assertEqual('', err.getvalue())
def test_post_fail(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(1, ret)
self.assertEqual('', out.getvalue())
self.assertEqual('No response!\n', err.getvalue())
def test_query_base(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?limit=200',
{},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1', 'bot/botid/tasks',
])
self._check_output('{\n "yo": "dawg"\n}\n', '')
self.assertEqual(0, ret)
def test_query_cursor(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&limit=2',
{},
{
'cursor': '%',
'extra': False,
'items': ['A'],
},
),
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&cursor=%25&limit=1',
{},
{
'cursor': None,
'items': ['B'],
'ignored': True,
},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1',
'bot/botid/tasks?foo=bar',
'--limit', '2',
])
expected = (
'{\n'
' "extra": false, \n'
' "items": [\n'
' "A", \n'
' "B"\n'
' ]\n'
'}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_reproduce(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
w = os.path.abspath('work')
self.assertEqual([os.path.join(w, 'foo'), '--bar'], cmd)
expected = os.environ.copy()
expected['aa'] = 'bb'
expected['PATH'] = os.pathsep.join(
(os.path.join(w, 'foo', 'bar'), os.path.join(w, 'second'),
expected['PATH']))
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(w), cwd)
return 0
self.mock(subprocess42, 'call', call)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/123/request',
{},
{
'properties': {
'command': ['foo'],
'env': [
{'key': 'aa', 'value': 'bb'},
],
'env_prefixes': [
{'key': 'PATH', 'value': ['foo/bar', 'second']},
],
'secret_bytes': None,
},
},
),
])
ret = self.main_safe(
[
'reproduce', '--swarming', 'https://localhost:1', '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
def test_run(self):
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://localhost:1', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(25210., timeout)
self.assertEqual(None, decorate)
self.assertEqual(None, print_status_updates)
self.assertEqual(None, task_summary_json)
self.assertEqual(None, task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
return 0
self.mock(swarming, 'collect', stub_collect)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'run',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (ret, actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar\nFake output\n', '')
def test_cancel(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/cancel',
{'data': {'kill_running': False}, 'method': 'POST'},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'cancel', '--swarming', 'https://localhost:1', '10100',
])
self._check_output('', '')
self.assertEqual(0, ret)
def test_collect_timeout_zero(self):
j = os.path.join(self.tempdir, 'foo.json')
pending = gen_result_response(state='PENDING')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/result',
{'retry_50x': True},
pending,
),
])
self.main_safe(
[
'collect', '--swarming', 'https://localhost:1',
'--task-summary-json', j, '--timeout', '-1', '10100',
])
self._check_output('swarm6: 10100 0\n', '')
with open(j, 'r') as f:
actual = json.load(f)
self.assertEqual({u'shards': [pending]}, actual)
class TestCommandBot(NetTestCase):
# Specialized test fixture for command 'bot'.
def setUp(self):
super(TestCommandBot, self).setUp()
# Sample data retrieved from actual server.
self.now = unicode(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
self.bot_1 = {
u'bot_id': u'swarm1',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm1']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.1',
u'hostname': u'swarm1.example.com',
u'internal_ip': u'192.168.0.1',
u'is_dead': True,
u'last_seen_ts': 'A long time ago',
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_2 = {
u'bot_id': u'swarm2',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [
u'15ad',
u'15ad:0405',
u'VMware Virtual SVGA 3D Graphics Adapter',
]},
{u'key': u'id', u'value': [u'swarm2']},
{u'key': u'os', u'value': [u'Windows', u'Windows-6.1']},
],
u'external_ip': u'1.1.1.2',
u'hostname': u'swarm2.example.com',
u'internal_ip': u'192.168.0.2',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_3 = {
u'bot_id': u'swarm3',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'4']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [u'15ad', u'15ad:0405']},
{u'key': u'id', u'value': [u'swarm3']},
{u'key': u'os', u'value': [u'Mac', u'Mac-10.9']},
],
u'external_ip': u'1.1.1.3',
u'hostname': u'swarm3.example.com',
u'internal_ip': u'192.168.0.3',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'148569b73a89501',
u'task_name': u'browser_tests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_4 = {
u'bot_id': u'swarm4',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm4']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.4',
u'hostname': u'swarm4.example.com',
u'internal_ip': u'192.168.0.4',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'14856971a64c601',
u'task_name': u'base_unittests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
def mock_swarming_api(self, bots, cursor):
"""Returns fake /api/swarming/v1/bots/list data."""
# Sample data retrieved from actual server.
return {
u'items': bots,
u'cursor': cursor,
u'death_timeout': 1800.0,
u'limit': 4,
u'now': unicode(self.now),
}
def test_bots(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(['bots', '--swarming', 'https://localhost:1'])
expected = (
u'swarm2\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": '
'["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics Adapter"], '
'"id": ["swarm2"], "os": ["Windows", "Windows-6.1"]}\n'
'swarm3\n'
' {"cores": ["4"], "cpu": ["x86", "x86-64"], "gpu": ["15ad", '
'"15ad:0405"], "id": ["swarm3"], "os": ["Mac", "Mac-10.9"]}\n'
u' task: 148569b73a89501\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_bare(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(
['bots', '--swarming', 'https://localhost:1', '--bare'])
self._check_output("swarm2\nswarm3\nswarm4\n", '')
self.assertEqual(0, ret)
def test_bots_filter(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=FALSE&is_busy=TRUE&is_mp=NONE&dimensions=os%3AWindows',
{},
self.mock_swarming_api([self.bot_2], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--busy',
'--dimension', 'os', 'Windows',
])
expected = (
u'swarm2\n {"cores": ["8"], "cpu": ["x86", "x86-64"], '
'"gpu": ["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics '
'Adapter"], "id": ["swarm2"], '
'"os": ["Windows", "Windows-6.1"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_keep_dead(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=NONE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_1, self.bot_4], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--keep-dead',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_dead_only(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=TRUE&is_busy=NONE&is_mp=NONE&dimensions=os%3AUbuntu',
{},
self.mock_swarming_api([self.bot_1], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--dimension', 'os', 'Ubuntu', '--dead-only',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
if __name__ == '__main__':
fix_encoding.fix_encoding()
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
for e in ('ISOLATE_SERVER', 'SWARMING_TASK_ID', 'SWARMING_SERVER'):
os.environ.pop(e, None)
unittest.main()
| 31.401368
| 80
| 0.526184
|
import datetime
import json
import logging
import os
import re
import StringIO
import sys
import tempfile
import threading
import time
import traceback
import unittest
import net_utils
from depot_tools import auto_stub
import auth
import isolateserver
import swarming
import test_utils
from depot_tools import fix_encoding
from utils import file_path
from utils import logging_utils
from utils import subprocess42
from utils import tools
import httpserver_mock
import isolateserver_mock
FILE_HASH = u'1' * 40
TEST_NAME = u'unit_tests'
OUTPUT = 'Ran stuff\n'
SHARD_OUTPUT_1 = 'Shard 1 of 3.'
SHARD_OUTPUT_2 = 'Shard 2 of 3.'
SHARD_OUTPUT_3 = 'Shard 3 of 3.'
def gen_yielded_data(index, **kwargs):
"""Returns an entry as it would be yielded by yield_results()."""
return index, gen_result_response(**kwargs)
def get_results(keys, output_collector=None):
"""Simplifies the call to yield_results().
The timeout is hard-coded to 10 seconds.
"""
return list(
swarming.yield_results(
'https://host:9001', keys, 10., None, True,
output_collector, False, True))
def collect(url, task_ids, task_stdout=('console', 'json')):
"""Simplifies the call to swarming.collect()."""
return swarming.collect(
swarming=url,
task_ids=task_ids,
timeout=10,
decorate=True,
print_status_updates=True,
task_summary_json=None,
task_output_dir=None,
task_output_stdout=task_stdout,
include_perf=False)
def main(args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
dispatcher = swarming.subcommand.CommandDispatcher('swarming')
return dispatcher.execute(swarming.OptionParserSwarming(), args)
def gen_properties(**kwargs):
out = {
'caches': [],
'cipd_input': None,
'command': None,
'relative_cwd': None,
'dimensions': [
{'key': 'foo', 'value': 'bar'},
{'key': 'os', 'value': 'Mac'},
],
'env': [],
'env_prefixes': [],
'execution_timeout_secs': 60,
'extra_args': ['--some-arg', '123'],
'grace_period_secs': 30,
'idempotent': False,
'inputs_ref': {
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
'io_timeout_secs': 60,
'outputs': [],
'secret_bytes': None,
}
out.update(kwargs)
return out
def gen_request_data(properties=None, **kwargs):
out = {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(**(properties or {})),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
}
out.update(kwargs)
return out
def gen_request_response(request, **kwargs):
# As seen in services/swarming/handlers_api.py.
out = {
'request': request.copy(),
'task_id': '12300',
}
out.update(kwargs)
return out
def gen_result_response(**kwargs):
out = {
u'bot_id': u'swarm6',
u'completed_ts': u'2014-09-24T13:49:16.012345',
u'created_ts': u'2014-09-24T13:49:03.012345',
u'duration': 0.9636809825897217,
u'exit_code': 0,
u'failure': False,
u'internal_failure': False,
u'modified_ts': u'2014-09-24T13:49:17.012345',
u'name': u'heartbeat-canary-2014-09-24_13:49:01-os=Ubuntu',
u'server_versions': [u'1'],
u'started_ts': u'2014-09-24T13:49:09.012345',
u'state': 'COMPLETED',
u'tags': [u'cpu:x86', u'priority:100', u'user:joe@localhost'],
u'task_id': u'10100',
u'try_number': 1,
u'user': u'joe@localhost',
}
out.update(kwargs)
return out
# Silence pylint 'Access to a protected member _Event of a client class'.
class NonBlockingEvent(threading._Event): # pylint: disable=W0212
"""Just like threading.Event, but a class and ignores timeout in 'wait'.
Intended to be used as a mock for threading.Event in tests.
"""
def wait(self, timeout=None):
return super(NonBlockingEvent, self).wait(0)
class SwarmingServerHandler(httpserver_mock.MockHandler):
"""An extremely minimal implementation of the swarming server API v1.0."""
def do_GET(self):
logging.info('S GET %s', self.path)
if self.path == '/auth/api/v1/server/oauth_config':
self.send_json({
'client_id': 'c',
'client_not_so_secret': 's',
'primary_url': self.server.url})
elif self.path == '/auth/api/v1/accounts/self':
self.send_json({'identity': 'user:joe', 'xsrf_token': 'foo'})
else:
m = re.match(r'/api/swarming/v1/task/(\d+)/request', self.path)
if m:
logging.info('%s', m.group(1))
self.send_json(self.server.tasks[int(m.group(1))])
else:
self.send_json( {'a': 'b'})
#raise NotImplementedError(self.path)
def do_POST(self):
logging.info('POST %s', self.path)
raise NotImplementedError(self.path)
class MockSwarmingServer(httpserver_mock.MockServer):
_HANDLER_CLS = SwarmingServerHandler
def __init__(self):
super(MockSwarmingServer, self).__init__()
self._server.tasks = {}
class Common(object):
def setUp(self):
self._tempdir = None
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
self.mock(logging_utils, 'prepare_logging', lambda *args: None)
self.mock(logging_utils, 'set_console_level', lambda *args: None)
def tearDown(self):
if self._tempdir:
file_path.rmtree(self._tempdir)
if not self.has_failed():
self._check_output('', '')
@property
def tempdir(self):
"""Creates the directory on first reference."""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix=u'swarming_test')
return self._tempdir
maxDiff = None
def _check_output(self, out, err):
self.assertMultiLineEqual(out, sys.stdout.getvalue())
self.assertMultiLineEqual(err, sys.stderr.getvalue())
# Flush their content by mocking them again.
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
def main_safe(self, args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
try:
return main(args)
except:
data = '%s\nSTDOUT:\n%s\nSTDERR:\n%s' % (
traceback.format_exc(), sys.stdout.getvalue(), sys.stderr.getvalue())
self.fail(data)
class NetTestCase(net_utils.TestCase, Common):
"""Base class that defines the url_open mock."""
def setUp(self):
net_utils.TestCase.setUp(self)
Common.setUp(self)
self.mock(time, 'sleep', lambda _: None)
self.mock(subprocess42, 'call', lambda *_: self.fail())
self.mock(threading, 'Event', NonBlockingEvent)
class TestIsolated(auto_stub.TestCase, Common):
"""Test functions with isolated_ prefix."""
def setUp(self):
auto_stub.TestCase.setUp(self)
Common.setUp(self)
self._isolate = isolateserver_mock.MockIsolateServer()
self._swarming = MockSwarmingServer()
def tearDown(self):
try:
self._isolate.close()
self._swarming.close()
finally:
Common.tearDown(self)
auto_stub.TestCase.tearDown(self)
def test_reproduce_isolated(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
outdir = os.path.join(self.tempdir, 'out')
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(
[sys.executable, u'main.py', u'foo', outdir, '--bar'], cmd)
expected = os.environ.copy()
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(os.path.abspath('work')), cwd)
return 0
self.mock(subprocess42, 'call', call)
main_hash = self._isolate.add_content_compressed(
'default-gzip', 'not executed')
isolated = {
'files': {
'main.py': {
'h': main_hash,
's': 12,
'm': 0700,
},
},
'command': ['python', 'main.py'],
}
isolated_hash = self._isolate.add_content_compressed(
'default-gzip', json.dumps(isolated))
self._swarming._server.tasks[123] = {
'properties': {
'inputs_ref': {
'isolatedserver': self._isolate.url,
'namespace': 'default-gzip',
'isolated': isolated_hash,
},
'extra_args': ['foo', '${ISOLATED_OUTDIR}'],
'secret_bytes': None,
},
}
ret = self.main_safe(
[
'reproduce', '--swarming', self._swarming.url, '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
class TestSwarmingTrigger(NetTestCase):
def test_trigger_task_shards_2_shards(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request_1 = swarming.task_request_to_raw_request(task_request)
request_1['name'] = u'unit_tests:0:2'
request_1['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '0'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_1 = gen_request_response(request_1)
request_2 = swarming.task_request_to_raw_request(task_request)
request_2['name'] = u'unit_tests:1:2'
request_2['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '1'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_2 = gen_request_response(request_2, task_id='12400')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_1},
result_1,
),
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_2},
result_2,
),
])
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
task_request=task_request,
shards=2)
expected = {
u'unit_tests:0:2': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
u'unit_tests:1:2': {
'shard_index': 1,
'task_id': '12400',
'view_url': 'https://localhost:1/user/task/12400',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shards_priority_override(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
self.assertEqual('123', request['parent_task_id'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_cipd_package(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=swarming.CipdInput(
client_package=None,
packages=[
swarming.CipdPackage(
package_name='mypackage',
path='path/to/package',
version='abc123')],
server=None),
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
expected = {
'client_package': None,
'packages': [{
'package_name': 'mypackage',
'path': 'path/to/package',
'version': 'abc123',
}],
'server': None
}
self.assertEqual(
expected, request['task_slices'][0]['properties']['cipd_input'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
class TestSwarmingCollection(NetTestCase):
def test_success(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT)]
self.assertEqual(expected, get_results(['10100']))
def test_failure(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT, exit_code=1)]
self.assertEqual(expected, get_results(['10100']))
def test_no_ids(self):
actual = get_results([])
self.assertEqual([], actual)
def test_url_errors(self):
self.mock(logging, 'error', lambda *_, **__: None)
now = {}
lock = threading.Lock()
def get_now():
t = threading.current_thread()
with lock:
return now.setdefault(t, range(10)).pop(0)
self.mock(swarming.net, 'sleep_before_retry', lambda _x, _y: None)
self.mock(swarming, 'now', get_now)
self.expected_requests(
9 * [
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
None,
)
])
actual = get_results(['10100'])
self.assertEqual([], actual)
self.assertTrue(all(not v for v in now.itervalues()), now)
def test_many_shards(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3),
]
actual = get_results(['10100', '10200', '10300'])
self.assertEqual(expected, sorted(actual))
def test_output_collector_called(self):
# Three shards, one failed. All results are passed to output collector.
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
class FakeOutputCollector(object):
def __init__(self):
self.results = []
self._lock = threading.Lock()
def process_shard_result(self, index, result):
with self._lock:
self.results.append((index, result))
output_collector = FakeOutputCollector()
get_results(['10100', '10200', '10300'], output_collector)
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3, exit_code=1),
]
self.assertEqual(sorted(expected), sorted(output_collector.results))
def test_collect_nothing(self):
self.mock(swarming, 'yield_results', lambda *_: [])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
self._check_output('', 'Results from some shards are missing: 0, 1\n')
def test_collect_success(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_success_nostdout(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100'], []))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_fail(self):
data = gen_result_response(output='Foo', exit_code=-9)
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(-9, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+-------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+-------------------------------------------------------+',
'Foo',
'+-------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: -9 |',
'+-------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_one_missing(self):
data = gen_result_response(output='Foo')
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'',
'Total duration: 1.0s',
''))
self._check_output(expected, 'Results from some shards are missing: 1\n')
def test_collect_multi(self):
actual_calls = []
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks):
self.assertIs(storage.__class__, isolateserver.Storage)
self.assertIs(cache.__class__, isolateserver.MemoryCache)
# Ensure storage is pointing to required location.
self.assertEqual('https://localhost:2', storage.location)
self.assertEqual('default', storage.namespace)
self.assertEqual(False, use_symlinks)
actual_calls.append((isolated_hash, outdir))
self.mock(isolateserver, 'fetch_isolated', fetch_isolated)
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index in xrange(2):
collector.process_shard_result(
index,
gen_result_response(
outputs_ref={
'isolated': str(index) * 40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
}))
summary = collector.finalize()
expected_calls = [
('0'*40, os.path.join(self.tempdir, '0')),
('1'*40, os.path.join(self.tempdir, '1')),
]
self.assertEqual(expected_calls, actual_calls)
# Ensure collected summary is correct.
outputs_refs = [
{
'isolated': '0'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '0'*40,
},
{
'isolated': '1'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '1'*40,
},
]
expected = {
'shards': [gen_result_response(outputs_ref=o) for o in outputs_refs],
}
self.assertEqual(expected, summary)
# Ensure summary dumped to a file is correct as well.
with open(os.path.join(self.tempdir, 'summary.json'), 'r') as f:
summary_dump = json.load(f)
self.assertEqual(expected, summary_dump)
def test_ensures_same_server(self):
self.mock(logging, 'error', lambda *_: None)
# Two shard results, attempt to use different servers.
actual_calls = []
self.mock(
isolateserver, 'fetch_isolated',
lambda *args: actual_calls.append(args))
data = [
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server1',
'namespace': 'namespace',
'isolated':'hash1',
}),
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server2',
'namespace': 'namespace',
'isolated':'hash1',
}),
]
# Feed them to collector.
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index, result in enumerate(data):
collector.process_shard_result(index, result)
collector.finalize()
# Only first fetch is made, second one is ignored.
self.assertEqual(1, len(actual_calls))
isolated_hash, storage, _, outdir, _ = actual_calls[0]
self.assertEqual(
('hash1', os.path.join(self.tempdir, '0')),
(isolated_hash, outdir))
self.assertEqual('https://server1', storage.location)
class TestMain(NetTestCase):
# Tests calling main().
def test_bot_delete(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/foo/delete',
{'method': 'POST', 'data': {}},
{},
),
])
ret = self.main_safe(
['bot_delete', '--swarming', 'https://localhost:1', 'foo', '--force'])
self._check_output('', '')
self.assertEqual(0, ret)
def test_trigger_raw_cmd(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_raw_cmd_isolated(self):
# Minimalist use.
request = {
'name': u'None/foo=bar/' + FILE_HASH,
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
},
io_timeout_secs=1200),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--isolate-server', 'https://localhost:2',
'--isolated', FILE_HASH,
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar/' + FILE_HASH + u'\n'
u'To collect results, use:\n'
u' swarming.py collect -S https://localhost:1 12300\n'
u'Or visit:\n'
u' https://localhost:1/user/task/12300\n',
u'')
def test_trigger_raw_cmd_with_service_account(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200),
},
],
'service_account': 'bot',
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--service-account', 'bot',
'--raw-cmd',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_hash(self):
# pylint: disable=unused-argument
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_and_json(self):
# pylint: disable=unused-argument
write_json_calls = []
self.mock(tools, 'write_json', lambda *args: write_json_calls.append(args))
subprocess_calls = []
self.mock(subprocess42, 'call', lambda *c: subprocess_calls.append(c))
self.mock(swarming, 'now', lambda: 123456)
isolated = os.path.join(self.tempdir, 'zaz.isolated')
content = '{}'
with open(isolated, 'wb') as f:
f.write(content)
isolated_hash = isolateserver_mock.hash_content(content)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--idempotent',
'--task-name', 'unit_tests',
'--dump-json', 'foo.json',
'--isolated', isolated_hash,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self.assertEqual([], subprocess_calls)
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 --json foo.json\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
expected = [
(
u'foo.json',
{
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
},
True,
),
]
self.assertEqual(expected, write_json_calls)
def test_trigger_cipd(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
cipd_input={
'client_package': None,
'packages': [
{
'package_name': 'super/awesome/pkg',
'path': 'path/to/pkg',
'version': 'version:42',
},
],
'server': None,
},
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--cipd-package', 'path/to/pkg:super/awesome/pkg:version:42',
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_no_request(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host',
'--isolate-server', 'https://host', '-T', 'foo',
'-d', 'os', 'amgia',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]\n'
'\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_env_vars(self):
with self.assertRaises(SystemExit):
main(['trigger'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_swarming_env_var(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'ISOLATE_SERVER': 'https://host'}):
main(['trigger', '-T' 'foo', 'foo.isolated'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_isolate_server(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'SWARMING_SERVER': 'https://host'}):
main(['trigger', 'foo.isolated', '-d', 'os', 'amiga'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_dimension(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host', '--raw-cmd', '--', 'foo',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Please at least specify one --dimension\n')
def test_collect_default_json(self):
j = os.path.join(self.tempdir, 'foo.json')
data = {
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
relative_cwd='deeep'),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
}
with open(j, 'wb') as f:
json.dump(data, f)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://host', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(3670., timeout)
self.assertEqual(True, decorate)
self.assertEqual(True, print_status_updates)
self.assertEqual('/a', task_summary_json)
self.assertEqual('/b', task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
self.mock(swarming, 'collect', stub_collect)
self.main_safe(
['collect', '--swarming', 'https://host', '--json', j, '--decorate',
'--print-status-updates', '--task-summary-json', '/a',
'--task-output-dir', '/b', '--task-output-stdout', 'all'])
self._check_output('Fake output\n', '')
def test_post(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
self.expected_requests(
[
(
'http://localhost:1/api/swarming/v1/tasks/new',
{'data': '{"a":"b"}', 'method': 'POST'},
'{"yo":"dawg"}',
{},
),
])
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(0, ret)
self.assertEqual('{"yo":"dawg"}', out.getvalue())
self.assertEqual('', err.getvalue())
def test_post_fail(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(1, ret)
self.assertEqual('', out.getvalue())
self.assertEqual('No response!\n', err.getvalue())
def test_query_base(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?limit=200',
{},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1', 'bot/botid/tasks',
])
self._check_output('{\n "yo": "dawg"\n}\n', '')
self.assertEqual(0, ret)
def test_query_cursor(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&limit=2',
{},
{
'cursor': '%',
'extra': False,
'items': ['A'],
},
),
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&cursor=%25&limit=1',
{},
{
'cursor': None,
'items': ['B'],
'ignored': True,
},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1',
'bot/botid/tasks?foo=bar',
'--limit', '2',
])
expected = (
'{\n'
' "extra": false, \n'
' "items": [\n'
' "A", \n'
' "B"\n'
' ]\n'
'}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_reproduce(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
w = os.path.abspath('work')
self.assertEqual([os.path.join(w, 'foo'), '--bar'], cmd)
expected = os.environ.copy()
expected['aa'] = 'bb'
expected['PATH'] = os.pathsep.join(
(os.path.join(w, 'foo', 'bar'), os.path.join(w, 'second'),
expected['PATH']))
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(w), cwd)
return 0
self.mock(subprocess42, 'call', call)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/123/request',
{},
{
'properties': {
'command': ['foo'],
'env': [
{'key': 'aa', 'value': 'bb'},
],
'env_prefixes': [
{'key': 'PATH', 'value': ['foo/bar', 'second']},
],
'secret_bytes': None,
},
},
),
])
ret = self.main_safe(
[
'reproduce', '--swarming', 'https://localhost:1', '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
def test_run(self):
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://localhost:1', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(25210., timeout)
self.assertEqual(None, decorate)
self.assertEqual(None, print_status_updates)
self.assertEqual(None, task_summary_json)
self.assertEqual(None, task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
return 0
self.mock(swarming, 'collect', stub_collect)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'run',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (ret, actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar\nFake output\n', '')
def test_cancel(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/cancel',
{'data': {'kill_running': False}, 'method': 'POST'},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'cancel', '--swarming', 'https://localhost:1', '10100',
])
self._check_output('', '')
self.assertEqual(0, ret)
def test_collect_timeout_zero(self):
j = os.path.join(self.tempdir, 'foo.json')
pending = gen_result_response(state='PENDING')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/result',
{'retry_50x': True},
pending,
),
])
self.main_safe(
[
'collect', '--swarming', 'https://localhost:1',
'--task-summary-json', j, '--timeout', '-1', '10100',
])
self._check_output('swarm6: 10100 0\n', '')
with open(j, 'r') as f:
actual = json.load(f)
self.assertEqual({u'shards': [pending]}, actual)
class TestCommandBot(NetTestCase):
# Specialized test fixture for command 'bot'.
def setUp(self):
super(TestCommandBot, self).setUp()
# Sample data retrieved from actual server.
self.now = unicode(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
self.bot_1 = {
u'bot_id': u'swarm1',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm1']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.1',
u'hostname': u'swarm1.example.com',
u'internal_ip': u'192.168.0.1',
u'is_dead': True,
u'last_seen_ts': 'A long time ago',
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_2 = {
u'bot_id': u'swarm2',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [
u'15ad',
u'15ad:0405',
u'VMware Virtual SVGA 3D Graphics Adapter',
]},
{u'key': u'id', u'value': [u'swarm2']},
{u'key': u'os', u'value': [u'Windows', u'Windows-6.1']},
],
u'external_ip': u'1.1.1.2',
u'hostname': u'swarm2.example.com',
u'internal_ip': u'192.168.0.2',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_3 = {
u'bot_id': u'swarm3',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'4']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [u'15ad', u'15ad:0405']},
{u'key': u'id', u'value': [u'swarm3']},
{u'key': u'os', u'value': [u'Mac', u'Mac-10.9']},
],
u'external_ip': u'1.1.1.3',
u'hostname': u'swarm3.example.com',
u'internal_ip': u'192.168.0.3',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'148569b73a89501',
u'task_name': u'browser_tests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_4 = {
u'bot_id': u'swarm4',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm4']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.4',
u'hostname': u'swarm4.example.com',
u'internal_ip': u'192.168.0.4',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'14856971a64c601',
u'task_name': u'base_unittests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
def mock_swarming_api(self, bots, cursor):
"""Returns fake /api/swarming/v1/bots/list data."""
# Sample data retrieved from actual server.
return {
u'items': bots,
u'cursor': cursor,
u'death_timeout': 1800.0,
u'limit': 4,
u'now': unicode(self.now),
}
def test_bots(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(['bots', '--swarming', 'https://localhost:1'])
expected = (
u'swarm2\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": '
'["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics Adapter"], '
'"id": ["swarm2"], "os": ["Windows", "Windows-6.1"]}\n'
'swarm3\n'
' {"cores": ["4"], "cpu": ["x86", "x86-64"], "gpu": ["15ad", '
'"15ad:0405"], "id": ["swarm3"], "os": ["Mac", "Mac-10.9"]}\n'
u' task: 148569b73a89501\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_bare(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(
['bots', '--swarming', 'https://localhost:1', '--bare'])
self._check_output("swarm2\nswarm3\nswarm4\n", '')
self.assertEqual(0, ret)
def test_bots_filter(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=FALSE&is_busy=TRUE&is_mp=NONE&dimensions=os%3AWindows',
{},
self.mock_swarming_api([self.bot_2], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--busy',
'--dimension', 'os', 'Windows',
])
expected = (
u'swarm2\n {"cores": ["8"], "cpu": ["x86", "x86-64"], '
'"gpu": ["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics '
'Adapter"], "id": ["swarm2"], '
'"os": ["Windows", "Windows-6.1"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_keep_dead(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=NONE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_1, self.bot_4], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--keep-dead',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_dead_only(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=TRUE&is_busy=NONE&is_mp=NONE&dimensions=os%3AUbuntu',
{},
self.mock_swarming_api([self.bot_1], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--dimension', 'os', 'Ubuntu', '--dead-only',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
if __name__ == '__main__':
fix_encoding.fix_encoding()
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
for e in ('ISOLATE_SERVER', 'SWARMING_TASK_ID', 'SWARMING_SERVER'):
os.environ.pop(e, None)
unittest.main()
| false
| true
|
79039cb2feeca0a0e1fb76501a8e9fce7881ea24
| 2,081
|
py
|
Python
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_smallworld.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_smallworld.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_smallworld.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
#! python
from nose.tools import assert_true, assert_raises
import random
from networkx import random_reference, lattice_reference, sigma, omega
import networkx as nx
rng = random.Random(0)
rng = 42
def test_random_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = random_reference(G, niter=1, seed=rng)
C = nx.average_clustering(G)
Cr = nx.average_clustering(Gr)
assert_true(C > Cr)
assert_raises(nx.NetworkXError, random_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, random_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = random_reference(H, niter=1, seed=rng)
def test_lattice_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gl = lattice_reference(G, niter=1, seed=rng)
L = nx.average_shortest_path_length(G)
Ll = nx.average_shortest_path_length(Gl)
assert_true(Ll > L)
assert_raises(nx.NetworkXError, lattice_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = lattice_reference(H, niter=1)
def test_sigma():
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
sigmas = sigma(Gs, niter=1, nrand=2, seed=rng)
sigmar = sigma(Gr, niter=1, nrand=2, seed=rng)
assert_true(sigmar < sigmas)
def test_omega():
Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
omegal = omega(Gl, niter=1, nrand=1, seed=rng)
omegar = omega(Gr, niter=1, nrand=1, seed=rng)
omegas = omega(Gs, niter=1, nrand=1, seed=rng)
print("omegas, omegal, omegar")
print(omegas, omegal, omegar)
assert_true(omegal < omegas and omegas < omegar)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| 31.059701
| 77
| 0.694858
|
from nose.tools import assert_true, assert_raises
import random
from networkx import random_reference, lattice_reference, sigma, omega
import networkx as nx
rng = random.Random(0)
rng = 42
def test_random_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = random_reference(G, niter=1, seed=rng)
C = nx.average_clustering(G)
Cr = nx.average_clustering(Gr)
assert_true(C > Cr)
assert_raises(nx.NetworkXError, random_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, random_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = random_reference(H, niter=1, seed=rng)
def test_lattice_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gl = lattice_reference(G, niter=1, seed=rng)
L = nx.average_shortest_path_length(G)
Ll = nx.average_shortest_path_length(Gl)
assert_true(Ll > L)
assert_raises(nx.NetworkXError, lattice_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = lattice_reference(H, niter=1)
def test_sigma():
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
sigmas = sigma(Gs, niter=1, nrand=2, seed=rng)
sigmar = sigma(Gr, niter=1, nrand=2, seed=rng)
assert_true(sigmar < sigmas)
def test_omega():
Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
omegal = omega(Gl, niter=1, nrand=1, seed=rng)
omegar = omega(Gr, niter=1, nrand=1, seed=rng)
omegas = omega(Gs, niter=1, nrand=1, seed=rng)
print("omegas, omegal, omegar")
print(omegas, omegal, omegar)
assert_true(omegal < omegas and omegas < omegar)
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| true
| true
|
79039d0db6195b8931e14d299f608211801c6d3f
| 3,854
|
py
|
Python
|
examples/hacker_news/hacker_news/resources/dbt_asset_resource.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
examples/hacker_news/hacker_news/resources/dbt_asset_resource.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
examples/hacker_news/hacker_news/resources/dbt_asset_resource.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
from typing import Any, Dict, List
import pandas
from dagster import AssetKey, AssetMaterialization, EventMetadataEntry
from dagster_dbt import DbtOutput
from .snowflake_io_manager import connect_snowflake
class DbtAssetResource:
"""
This class defines a resource that is capable of producing a list of AssetMaterializations from
a DbtOutput. It has one public function, get_asset_materializations(), which finds all the
generated models in the dbt output and produces corresponding asset materializations.
Putting this logic in a resource makes it easier to swap out between modes. You probably want
your local testing / development pipelines to produce different assets than your production
pipelines, as they will ideally be writing to different tables (with different dbt profiles).
"""
def __init__(self, asset_key_prefix: List[str]):
self._asset_key_prefix = asset_key_prefix
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
return [
EventMetadataEntry.float(
value=result["execution_time"], label="Execution Time (seconds)"
)
]
def get_asset_materializations(self, dbt_output: DbtOutput) -> List[AssetMaterialization]:
ret = []
# dbt_output.result contains the parsed contents of the results.json file
# Note that the json schema can change from version to version. This is written for
# https://schemas.getdbt.com/dbt/run-results/v2.json (also will work with v1.json)
for result in dbt_output.result["results"]:
if result["status"] != "success":
continue
unique_id = result["unique_id"]
# Here, we choose a naming scheme for our asset keys that will look something like
# <asset prefix> / model / <dbt project> / <model name>, but this is pretty arbitrary
asset_key = AssetKey(self._asset_key_prefix + unique_id.split("."))
# create an AssetMaterialization with our key and metadata
ret.append(
AssetMaterialization(
description=f"dbt node: {unique_id}",
metadata_entries=self._get_metadata(result),
asset_key=asset_key,
)
)
return ret
class SnowflakeQueryDbtAssetResource(DbtAssetResource):
"""
This resource allows us to add in some extra information to these AssetMaterialization events.
Because the relevant dbt project is configured for a Snowflake cluster, we can query the output
models to get some additional information that we might want Dagster to track over time.
Of course, this is completely optional.
"""
def __init__(self, snowflake_config: Dict[str, str], dbt_schema: str):
self._snowflake_config = snowflake_config
self._dbt_schema = dbt_schema
super().__init__(asset_key_prefix=["snowflake", dbt_schema])
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
"""
Here, we run queries against our output Snowflake database tables to add additional context
to our asset materializations.
"""
table_name = result["unique_id"].split(".")[-1]
with connect_snowflake(config=self._snowflake_config, schema=self._dbt_schema) as con:
n_rows = pandas.read_sql_query(f"SELECT COUNT(*) FROM {table_name}", con)
sample_rows = pandas.read_sql_query(
f"SELECT * FROM {table_name} SAMPLE ROW (10 rows)", con
)
return super()._get_metadata(result) + [
EventMetadataEntry.int(int(n_rows.iloc[0][0]), "dbt Model Number of Rows"),
EventMetadataEntry.md(sample_rows.astype("str").to_markdown(), "dbt Model Sample Rows"),
]
| 43.795455
| 100
| 0.674364
|
from typing import Any, Dict, List
import pandas
from dagster import AssetKey, AssetMaterialization, EventMetadataEntry
from dagster_dbt import DbtOutput
from .snowflake_io_manager import connect_snowflake
class DbtAssetResource:
def __init__(self, asset_key_prefix: List[str]):
self._asset_key_prefix = asset_key_prefix
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
return [
EventMetadataEntry.float(
value=result["execution_time"], label="Execution Time (seconds)"
)
]
def get_asset_materializations(self, dbt_output: DbtOutput) -> List[AssetMaterialization]:
ret = []
for result in dbt_output.result["results"]:
if result["status"] != "success":
continue
unique_id = result["unique_id"]
asset_key = AssetKey(self._asset_key_prefix + unique_id.split("."))
ret.append(
AssetMaterialization(
description=f"dbt node: {unique_id}",
metadata_entries=self._get_metadata(result),
asset_key=asset_key,
)
)
return ret
class SnowflakeQueryDbtAssetResource(DbtAssetResource):
def __init__(self, snowflake_config: Dict[str, str], dbt_schema: str):
self._snowflake_config = snowflake_config
self._dbt_schema = dbt_schema
super().__init__(asset_key_prefix=["snowflake", dbt_schema])
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
table_name = result["unique_id"].split(".")[-1]
with connect_snowflake(config=self._snowflake_config, schema=self._dbt_schema) as con:
n_rows = pandas.read_sql_query(f"SELECT COUNT(*) FROM {table_name}", con)
sample_rows = pandas.read_sql_query(
f"SELECT * FROM {table_name} SAMPLE ROW (10 rows)", con
)
return super()._get_metadata(result) + [
EventMetadataEntry.int(int(n_rows.iloc[0][0]), "dbt Model Number of Rows"),
EventMetadataEntry.md(sample_rows.astype("str").to_markdown(), "dbt Model Sample Rows"),
]
| true
| true
|
79039d748c17ab53e358119bb76c8822a33ac1f2
| 1,584
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_Class18.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_Class18.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_Class18.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=8
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=7
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class18.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 26.4
| 80
| 0.667298
|
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.X.on(input_qubit[1]))
c.append(cirq.X.on(input_qubit[1]))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class18.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| true
| true
|
79039e4abe6ff5a91f1f37e906b3a02ab82b7e8c
| 11,782
|
py
|
Python
|
tests/test_docs.py
|
tabulon-ext/moban
|
dcb3d9751949247b657aa5423280cf1183bb0a26
|
[
"MIT"
] | 32
|
2017-12-03T00:13:15.000Z
|
2022-02-28T15:20:43.000Z
|
tests/test_docs.py
|
tabulon-ext/moban
|
dcb3d9751949247b657aa5423280cf1183bb0a26
|
[
"MIT"
] | 353
|
2017-07-05T18:36:51.000Z
|
2020-09-24T13:42:03.000Z
|
tests/test_docs.py
|
tabulon-ext/moban
|
dcb3d9751949247b657aa5423280cf1183bb0a26
|
[
"MIT"
] | 23
|
2018-01-08T09:23:01.000Z
|
2021-12-23T07:21:21.000Z
|
import os
import fs
from .utils import Docs, custom_dedent
class TestTutorial(Docs):
def test_level_1(self):
expected = "world"
folder = "level-1-jinja2-cli"
self._moban(folder, expected)
def test_level_1_custom_define(self):
expected = "maailman"
folder = "level-1-jinja2-cli"
args = [
"moban",
"-d",
"hello=maailman",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
def test_level_2(self):
expected = """
========header============
world
========footer============
"""
expected = custom_dedent(expected)
folder = "level-2-template-inheritance"
self._moban(folder, expected)
def test_level_3(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-3-data-override"
self._moban(folder, expected)
def test_level_4(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-4-single-command"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_5(self):
expected = """
========header============
world
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-5-custom-configuration"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_6(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-6-complex-configuration"
self.run_moban(["moban"], folder, [("a.output2", expected)])
def test_level_20(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-20-templates-configs-in-zip-or-tar"
self.run_moban_with_fs(
["moban"], folder, [("zip://a.zip!/a.output2", expected)]
)
def test_level_7(self):
expected = """
Hello, you are in level 7 example
Hello, you are not in level 7
"""
expected = custom_dedent(expected)
folder = "level-7-use-custom-jinja2-filter-test-n-global"
self.run_moban(["moban"], folder, [("test.output", expected)])
def test_level_8(self):
expected = "it is a test\n"
folder = "level-8-pass-a-folder-full-of-templates"
check_file = fs.path.join("templated-folder", "my")
self.run_moban(["moban"], folder, [(check_file, expected)])
def test_level_9(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_24(self):
expected = "pypi-mobans: files over http protocol"
folder = "level-24-files-over-http"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_9_deprecated(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "deprecated-level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10_deprecated(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "deprecated-level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_11(self):
expected = "handlebars does not support inheritance\n"
folder = "level-11-use-handlebars"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_12(self):
expected_a = """
world
world
world
world
b.template exists
a/b
Static text generator using any template, any data and any location.
"""
expected_b = """
142
42
142
"""
expected_a = custom_dedent(expected_a)
expected_b = custom_dedent(expected_b)
folder = "level-12-use-template-engine-extensions"
self.run_moban(
["moban"],
folder,
[("a.output", expected_a), ("b.output", expected_b)],
)
def test_level_13_json(self):
expected = """
========header============
world from child.json
shijie from parent.yaml
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.json",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_13_yaml(self):
expected = """
========header============
world from child.yaml
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.yaml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_14_custom(self):
expected = """
========header============
world from child.cusom
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-14-custom-data-loader"
commands = ["moban"]
self.run_moban(commands, folder, [("a.output", expected)])
def test_level_15_copy_templates_as_target(self):
expected = "test file\n"
folder = "level-15-copy-templates-as-target"
assertions = [
("simple.file", expected),
(
"target_without_template_type",
"file extension will trigger copy engine\n",
),
(
"target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
),
(
"output_is_copied.same_file_extension",
"it is implicit copy as well",
),
]
self.run_moban(["moban"], folder, assertions)
def test_level_21_copy_templates_into_zips(self):
expected = "test file\n"
folder = "level-21-copy-templates-into-an-alien-file-system"
long_url = (
"zip://my.zip!/test-recursive-dir/sub_directory_is_copied"
+ "/because_star_star_is_specified.txt"
)
criterias = [
["zip://my.zip!/simple.file", expected],
[
"zip://my.zip!/target_without_template_type",
"file extension will trigger copy engine\n",
],
[
"zip://my.zip!/target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
],
["zip://my.zip!/test-dir/afile.txt", "dir for copying\n"],
[long_url, "dest_directory: source_directory/**\n"],
]
self.run_moban_with_fs(["moban"], folder, criterias)
def test_level_16_group_targets_using_template_type(self):
expected = "test file\n"
folder = "level-16-group-targets-using-template-type"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_17_force_template_type_from_moban_file(self):
expected = "test file\n"
folder = "level-17-force-template-type-from-moban-file"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_18_user_defined_template_types(self):
from datetime import datetime
expected = "{date}\n".format(date=datetime.now().strftime("%Y-%m-%d"))
folder = "level-18-user-defined-template-types"
self.run_moban(
["moban"],
folder,
[("a.output", expected), ("b.output", "shijie\n")],
)
def test_level_19_without_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
assertions = [
("simple.file", expected),
("a.output", "I will not be selected in level 19\n"),
]
self.run_moban(["moban"], folder, assertions)
def test_level_19_with_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
self.run_moban(
["moban", "-g", "copy"], folder, [("simple.file", expected)]
)
# make sure only copy target is executed
assert False == os.path.exists("a.output")
def test_level_22_intermediate_targets(self):
expected = "a world\n"
folder = "level-22-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert os.path.exists("intermediate.jj2")
def test_level_25_delete_intermediate_targets(self):
expected = "a world\n"
folder = "level-25-delete-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.jj2")
assert not os.path.exists("intermediate2.jj2")
assert not os.path.exists("intermediate3.jj2")
def test_level_26_strip_intermediate_targets(self):
expected = "a world"
folder = "level-26-strip-rendered-content"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.strip")
def test_level_23_inherit_parent_moban_file(self):
folder = "level-23-inherit-organisational-moban-file"
self.run_moban(
["moban"],
folder,
[("output_a", "I am template a"), ("output_b", "I am template b")],
)
def test_misc_1(self):
expected = "test file\n"
folder = "misc-1-copying-templates"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def _moban(self, folder, expected):
args = [
"moban",
"-c",
"data.yml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
| 29.903553
| 81
| 0.534035
|
import os
import fs
from .utils import Docs, custom_dedent
class TestTutorial(Docs):
def test_level_1(self):
expected = "world"
folder = "level-1-jinja2-cli"
self._moban(folder, expected)
def test_level_1_custom_define(self):
expected = "maailman"
folder = "level-1-jinja2-cli"
args = [
"moban",
"-d",
"hello=maailman",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
def test_level_2(self):
expected = """
========header============
world
========footer============
"""
expected = custom_dedent(expected)
folder = "level-2-template-inheritance"
self._moban(folder, expected)
def test_level_3(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-3-data-override"
self._moban(folder, expected)
def test_level_4(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-4-single-command"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_5(self):
expected = """
========header============
world
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-5-custom-configuration"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_6(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-6-complex-configuration"
self.run_moban(["moban"], folder, [("a.output2", expected)])
def test_level_20(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-20-templates-configs-in-zip-or-tar"
self.run_moban_with_fs(
["moban"], folder, [("zip://a.zip!/a.output2", expected)]
)
def test_level_7(self):
expected = """
Hello, you are in level 7 example
Hello, you are not in level 7
"""
expected = custom_dedent(expected)
folder = "level-7-use-custom-jinja2-filter-test-n-global"
self.run_moban(["moban"], folder, [("test.output", expected)])
def test_level_8(self):
expected = "it is a test\n"
folder = "level-8-pass-a-folder-full-of-templates"
check_file = fs.path.join("templated-folder", "my")
self.run_moban(["moban"], folder, [(check_file, expected)])
def test_level_9(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_24(self):
expected = "pypi-mobans: files over http protocol"
folder = "level-24-files-over-http"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_9_deprecated(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "deprecated-level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10_deprecated(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "deprecated-level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_11(self):
expected = "handlebars does not support inheritance\n"
folder = "level-11-use-handlebars"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_12(self):
expected_a = """
world
world
world
world
b.template exists
a/b
Static text generator using any template, any data and any location.
"""
expected_b = """
142
42
142
"""
expected_a = custom_dedent(expected_a)
expected_b = custom_dedent(expected_b)
folder = "level-12-use-template-engine-extensions"
self.run_moban(
["moban"],
folder,
[("a.output", expected_a), ("b.output", expected_b)],
)
def test_level_13_json(self):
expected = """
========header============
world from child.json
shijie from parent.yaml
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.json",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_13_yaml(self):
expected = """
========header============
world from child.yaml
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.yaml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_14_custom(self):
expected = """
========header============
world from child.cusom
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-14-custom-data-loader"
commands = ["moban"]
self.run_moban(commands, folder, [("a.output", expected)])
def test_level_15_copy_templates_as_target(self):
expected = "test file\n"
folder = "level-15-copy-templates-as-target"
assertions = [
("simple.file", expected),
(
"target_without_template_type",
"file extension will trigger copy engine\n",
),
(
"target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
),
(
"output_is_copied.same_file_extension",
"it is implicit copy as well",
),
]
self.run_moban(["moban"], folder, assertions)
def test_level_21_copy_templates_into_zips(self):
expected = "test file\n"
folder = "level-21-copy-templates-into-an-alien-file-system"
long_url = (
"zip://my.zip!/test-recursive-dir/sub_directory_is_copied"
+ "/because_star_star_is_specified.txt"
)
criterias = [
["zip://my.zip!/simple.file", expected],
[
"zip://my.zip!/target_without_template_type",
"file extension will trigger copy engine\n",
],
[
"zip://my.zip!/target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
],
["zip://my.zip!/test-dir/afile.txt", "dir for copying\n"],
[long_url, "dest_directory: source_directory/**\n"],
]
self.run_moban_with_fs(["moban"], folder, criterias)
def test_level_16_group_targets_using_template_type(self):
expected = "test file\n"
folder = "level-16-group-targets-using-template-type"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_17_force_template_type_from_moban_file(self):
expected = "test file\n"
folder = "level-17-force-template-type-from-moban-file"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_18_user_defined_template_types(self):
from datetime import datetime
expected = "{date}\n".format(date=datetime.now().strftime("%Y-%m-%d"))
folder = "level-18-user-defined-template-types"
self.run_moban(
["moban"],
folder,
[("a.output", expected), ("b.output", "shijie\n")],
)
def test_level_19_without_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
assertions = [
("simple.file", expected),
("a.output", "I will not be selected in level 19\n"),
]
self.run_moban(["moban"], folder, assertions)
def test_level_19_with_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
self.run_moban(
["moban", "-g", "copy"], folder, [("simple.file", expected)]
)
# make sure only copy target is executed
assert False == os.path.exists("a.output")
def test_level_22_intermediate_targets(self):
expected = "a world\n"
folder = "level-22-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert os.path.exists("intermediate.jj2")
def test_level_25_delete_intermediate_targets(self):
expected = "a world\n"
folder = "level-25-delete-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.jj2")
assert not os.path.exists("intermediate2.jj2")
assert not os.path.exists("intermediate3.jj2")
def test_level_26_strip_intermediate_targets(self):
expected = "a world"
folder = "level-26-strip-rendered-content"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.strip")
def test_level_23_inherit_parent_moban_file(self):
folder = "level-23-inherit-organisational-moban-file"
self.run_moban(
["moban"],
folder,
[("output_a", "I am template a"), ("output_b", "I am template b")],
)
def test_misc_1(self):
expected = "test file\n"
folder = "misc-1-copying-templates"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def _moban(self, folder, expected):
args = [
"moban",
"-c",
"data.yml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
| true
| true
|
79039e4ce5caf5ffbea541e6b08a5b24e139ff01
| 6,616
|
py
|
Python
|
stock-filters/NeoCortex.py
|
Sebastianchr22/Minecraft-Settlement-Generation
|
5c902595b47c3c75c96485b29c4e76a07470a431
|
[
"0BSD"
] | null | null | null |
stock-filters/NeoCortex.py
|
Sebastianchr22/Minecraft-Settlement-Generation
|
5c902595b47c3c75c96485b29c4e76a07470a431
|
[
"0BSD"
] | null | null | null |
stock-filters/NeoCortex.py
|
Sebastianchr22/Minecraft-Settlement-Generation
|
5c902595b47c3c75c96485b29c4e76a07470a431
|
[
"0BSD"
] | null | null | null |
from math import sqrt
from PrefrontalCortex import Impulse
from Decisions import Decisions
from Decision import Decision
import random as rand
# The job of the Neo-cortex is to evaluate, think, and consider.
# It is a slow brain part, but a highly important one, it's job is to perform tasks for the prefrontal cortex (to make it happy),
# While finding the optimal ways to do those tasks.
class NeoCortex:
def __init__(self, settler, world_grid):
self.settler = settler
self.decision_tree = self.settler._get_decisions()
self.world_grid = world_grid
self.xz_grid = self.get_xz_of(world_grid[:])
def get_xz_of(self, grid):
l = []
for cell in grid:
c = []
for block in cell.get_chunk():
c.append((block[0], block[2]))
l.append(c)
return l
def handle_impulse(self, impulse, weights):
text = ""
if impulse.name == Impulse.WANT_FOOD.name:
food = self._go_hunt()
if food > 0:
text = "Went to hunt, and found "+ str(food) +" food!"
else:
text = "Went to hunt, and found nothing.."
elif impulse.name == Impulse.WANT_SHELTER.name:
text = self._go_build_shelter()
elif impulse.name == Impulse.WANT_SLEEP.name:
self._go_sleep()
text = "Went to sleep"
elif impulse.name == Impulse.WANT_CHILDREN.name:
if self.settler._get_has_mate():
self._go_mate()
text = "Went to mate"
else:
text = self._go_find_mate()
#print "SETTLER: ", text
decision = Decision(text, impulse, weights)
self.decision_tree.new_decision(decision)
#Returns a boolean value true if the settler found food after hunting
def _go_hunt(self):
self.settler._move(self.find_free_grid_cell()) #Action
success_prob = 0.5
bounds = (0, 10)
found_food = rand.randrange(bounds[0], bounds[1], 1) >= bounds[1] * success_prob
food = int(found_food) * int(rand.randrange(0, 2))
self.settler.add_food(food)
return food
def _go_build_shelter(self):
self.move_to_suitable_plot()
self.settler.settlement.settler_claims_index(self.settler.origin)
self.settler._build() #Action
self.world_grid[self.settler.origin].use_segment() #Mental note
self.settler.set_has_shelter()
return "Successfully built a shelter"
def _go_sleep(self):
pass
def _go_mate(self):
self.settler._mate()
def _go_find_mate(self):
success, mates = self.get_suitable_mates()
if success:
mated, num_kids = self.settler._find_and_mate(mates)
text = ""
if mated:
text = "Had " + str(num_kids) + " children"
else:
text = "Got no consent from suitable mates"
return text
else:
return "Failed to find suitable mates"
def old_can_build(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
dist = 0
if self.settler.settlement.get_index_claimed(self.settler.origin):
return False
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= 5 and dist[1] <= 5:
return False
return True
def move_to_suitable_plot(self):
close_shelters = self.get_close_houses()
if len(close_shelters) > 0:
self_loc = self.world_grid[self.settler.origin].get_chunk()[0]
average_loc = (self_loc[0], self_loc[2])
for shelter_loc in close_shelters:
average_loc += (-(shelter_loc[0] - self_loc[0]), -(shelter_loc[2] - self_loc[2]))
self.settler._move(self.get_index_of(average_loc, self.xz_grid))
min_shelter_dist = 10
def get_close_houses(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
close_shelters_locs = []
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= self.min_shelter_dist and dist[1] <= self.min_shelter_dist:
close_shelters_locs.append(t)
if self.settler.settlement.get_index_claimed(self.settler.origin):
close_shelters_locs.append(s)
return close_shelters_locs
def find_free_grid_cell(self):
point = self.world_grid[self.settler.origin].get_chunk()[0] #Initial and fallback (no move)
attempts = 0
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
while not self.point_in_grid(new_point, self.xz_grid):
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
if self.settler.steps_left <= 0:
print "Settler died thinking"
return self.settler.origin
if attempts % 5 == 0: #Slowly die trying to move (prevents stalling)
self.settler.steps_left -= 1
attempts += 1
return self.get_index_of(new_point, self.xz_grid)
def get_step_size(self, loc):
d = 5 #One chunk per step
return int(rand.normalvariate(loc, d))
def point_in_grid(self, point, grid):
for cell in grid:
if point in cell:
return True
return False
def get_index_of(self, point, grid):
for cell in grid:
if point in cell:
return grid.index(cell)
return 0
def get_index_of_3d(self, point, grid):
for cell in grid:
if point in cell.get_chunk():
return grid.index(cell)
return self.find_free_grid_cell()
def get_suitable_mates(self):
suitable = []
for settler in self.settler.settlement.get_all_settlers():
if settler._get_has_shelter():
suitable.append(settler)
if len(suitable) <= 0:
return False, suitable
else:
return True, suitable
| 37.378531
| 130
| 0.587666
|
from math import sqrt
from PrefrontalCortex import Impulse
from Decisions import Decisions
from Decision import Decision
import random as rand
# While finding the optimal ways to do those tasks.
class NeoCortex:
def __init__(self, settler, world_grid):
self.settler = settler
self.decision_tree = self.settler._get_decisions()
self.world_grid = world_grid
self.xz_grid = self.get_xz_of(world_grid[:])
def get_xz_of(self, grid):
l = []
for cell in grid:
c = []
for block in cell.get_chunk():
c.append((block[0], block[2]))
l.append(c)
return l
def handle_impulse(self, impulse, weights):
text = ""
if impulse.name == Impulse.WANT_FOOD.name:
food = self._go_hunt()
if food > 0:
text = "Went to hunt, and found "+ str(food) +" food!"
else:
text = "Went to hunt, and found nothing.."
elif impulse.name == Impulse.WANT_SHELTER.name:
text = self._go_build_shelter()
elif impulse.name == Impulse.WANT_SLEEP.name:
self._go_sleep()
text = "Went to sleep"
elif impulse.name == Impulse.WANT_CHILDREN.name:
if self.settler._get_has_mate():
self._go_mate()
text = "Went to mate"
else:
text = self._go_find_mate()
#print "SETTLER: ", text
decision = Decision(text, impulse, weights)
self.decision_tree.new_decision(decision)
#Returns a boolean value true if the settler found food after hunting
def _go_hunt(self):
self.settler._move(self.find_free_grid_cell()) #Action
success_prob = 0.5
bounds = (0, 10)
found_food = rand.randrange(bounds[0], bounds[1], 1) >= bounds[1] * success_prob
food = int(found_food) * int(rand.randrange(0, 2))
self.settler.add_food(food)
return food
def _go_build_shelter(self):
self.move_to_suitable_plot()
self.settler.settlement.settler_claims_index(self.settler.origin)
self.settler._build() #Action
self.world_grid[self.settler.origin].use_segment() #Mental note
self.settler.set_has_shelter()
return "Successfully built a shelter"
def _go_sleep(self):
pass
def _go_mate(self):
self.settler._mate()
def _go_find_mate(self):
success, mates = self.get_suitable_mates()
if success:
mated, num_kids = self.settler._find_and_mate(mates)
text = ""
if mated:
text = "Had " + str(num_kids) + " children"
else:
text = "Got no consent from suitable mates"
return text
else:
return "Failed to find suitable mates"
def old_can_build(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
dist = 0
if self.settler.settlement.get_index_claimed(self.settler.origin):
return False
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= 5 and dist[1] <= 5:
return False
return True
def move_to_suitable_plot(self):
close_shelters = self.get_close_houses()
if len(close_shelters) > 0:
self_loc = self.world_grid[self.settler.origin].get_chunk()[0]
average_loc = (self_loc[0], self_loc[2])
for shelter_loc in close_shelters:
average_loc += (-(shelter_loc[0] - self_loc[0]), -(shelter_loc[2] - self_loc[2]))
self.settler._move(self.get_index_of(average_loc, self.xz_grid))
min_shelter_dist = 10
def get_close_houses(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
close_shelters_locs = []
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= self.min_shelter_dist and dist[1] <= self.min_shelter_dist:
close_shelters_locs.append(t)
if self.settler.settlement.get_index_claimed(self.settler.origin):
close_shelters_locs.append(s)
return close_shelters_locs
def find_free_grid_cell(self):
point = self.world_grid[self.settler.origin].get_chunk()[0] #Initial and fallback (no move)
attempts = 0
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
while not self.point_in_grid(new_point, self.xz_grid):
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
if self.settler.steps_left <= 0:
print "Settler died thinking"
return self.settler.origin
if attempts % 5 == 0: #Slowly die trying to move (prevents stalling)
self.settler.steps_left -= 1
attempts += 1
return self.get_index_of(new_point, self.xz_grid)
def get_step_size(self, loc):
d = 5 #One chunk per step
return int(rand.normalvariate(loc, d))
def point_in_grid(self, point, grid):
for cell in grid:
if point in cell:
return True
return False
def get_index_of(self, point, grid):
for cell in grid:
if point in cell:
return grid.index(cell)
return 0
def get_index_of_3d(self, point, grid):
for cell in grid:
if point in cell.get_chunk():
return grid.index(cell)
return self.find_free_grid_cell()
def get_suitable_mates(self):
suitable = []
for settler in self.settler.settlement.get_all_settlers():
if settler._get_has_shelter():
suitable.append(settler)
if len(suitable) <= 0:
return False, suitable
else:
return True, suitable
| false
| true
|
79039f4935ee01b3c9deb96a41fee01735c61ec5
| 278
|
py
|
Python
|
homonym.py
|
Biatris/Homonym
|
5fd4f295f2454e9a314ad271b05edbcad0dc7c8c
|
[
"MIT"
] | null | null | null |
homonym.py
|
Biatris/Homonym
|
5fd4f295f2454e9a314ad271b05edbcad0dc7c8c
|
[
"MIT"
] | null | null | null |
homonym.py
|
Biatris/Homonym
|
5fd4f295f2454e9a314ad271b05edbcad0dc7c8c
|
[
"MIT"
] | null | null | null |
class HomonymException(Exception):
def _init_ (self, *args):
super()._init_(args)
class Homonym():
def __init__(self):
pass
def CreateModel(self):
pass
def SgdScore(self, rounds):
pass
def FindErrors(self):
pass
| 14.631579
| 34
| 0.579137
|
class HomonymException(Exception):
def _init_ (self, *args):
super()._init_(args)
class Homonym():
def __init__(self):
pass
def CreateModel(self):
pass
def SgdScore(self, rounds):
pass
def FindErrors(self):
pass
| true
| true
|
79039fe4d32b0ecf731cccf14a7b40da4ba42599
| 58,533
|
py
|
Python
|
src/transformers/models/convbert/modeling_tf_convbert.py
|
kct22aws/transformers
|
04cddaf402591e9f5bdb5f116a111d829a0ce4f4
|
[
"Apache-2.0"
] | 5
|
2020-10-30T13:07:02.000Z
|
2021-03-17T12:18:30.000Z
|
src/transformers/models/convbert/modeling_tf_convbert.py
|
guang7400613/transformers
|
28e091430eea9e0d40839e56fd0d57aec262f5f9
|
[
"Apache-2.0"
] | 1
|
2022-01-17T03:24:35.000Z
|
2022-01-17T03:24:35.000Z
|
src/transformers/models/convbert/modeling_tf_convbert.py
|
guang7400613/transformers
|
28e091430eea9e0d40839e56fd0d57aec262f5f9
|
[
"Apache-2.0"
] | 1
|
2022-02-08T19:37:39.000Z
|
2022-02-08T19:37:39.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| 40.59154
| 132
| 0.663164
|
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
]
class TFConvBertEmbeddings(tf.keras.layers.Layer):
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype)
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:]
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| true
| true
|
7903a053ea8b41b764eacc2341c116e970412aac
| 5,043
|
py
|
Python
|
tests/cli/commands/test_plugins_command.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
tests/cli/commands/test_plugins_command.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
tests/cli/commands/test_plugins_command.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import textwrap
import unittest
from contextlib import redirect_stdout
from airflow.cli import cli_parser
from airflow.cli.commands import plugins_command
from airflow.hooks.base import BaseHook
from airflow.listeners.listener import get_listener_manager
from airflow.plugins_manager import AirflowPlugin
from tests.plugins.test_plugin import AirflowTestPlugin as ComplexAirflowPlugin
from tests.test_utils.mock_plugins import mock_plugin_manager
class PluginHook(BaseHook):
pass
class TestPlugin(AirflowPlugin):
name = "test-plugin-cli"
hooks = [PluginHook]
class TestPluginsCommand(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
@mock_plugin_manager(plugins=[])
def test_should_display_no_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
assert 'No plugins loaded' in stdout
@mock_plugin_manager(plugins=[ComplexAirflowPlugin])
def test_should_display_one_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
print(stdout)
info = json.loads(stdout)
assert info == [
{
'name': 'test_plugin',
'macros': ['tests.plugins.test_plugin.plugin_macro'],
'executors': ['tests.plugins.test_plugin.PluginExecutor'],
'flask_blueprints': [
"<flask.blueprints.Blueprint: name='test_plugin' import_name='tests.plugins.test_plugin'>"
],
'appbuilder_views': [
{
'name': 'Test View',
'category': 'Test Plugin',
'view': 'tests.plugins.test_plugin.PluginTestAppBuilderBaseView',
}
],
'global_operator_extra_links': [
'<tests.test_utils.mock_operators.AirflowLink object>',
'<tests.test_utils.mock_operators.GithubLink object>',
],
'timetables': ['tests.plugins.test_plugin.CustomCronDataIntervalTimetable'],
'operator_extra_links': [
'<tests.test_utils.mock_operators.GoogleLink object>',
'<tests.test_utils.mock_operators.AirflowLink2 object>',
'<tests.test_utils.mock_operators.CustomOpLink object>',
'<tests.test_utils.mock_operators.CustomBaseIndexOpLink object>',
],
'hooks': ['tests.plugins.test_plugin.PluginHook'],
'listeners': ['tests.listeners.empty_listener'],
'source': None,
'appbuilder_menu_items': [
{'name': 'Google', 'href': 'https://www.google.com', 'category': 'Search'},
{
'name': 'apache',
'href': 'https://www.apache.org/',
'label': 'The Apache Software Foundation',
},
],
'ti_deps': ['<TIDep(CustomTestTriggerRule)>'],
}
]
get_listener_manager().clear()
@mock_plugin_manager(plugins=[TestPlugin])
def test_should_display_one_plugins_as_table(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=table']))
stdout = temp_stdout.getvalue()
# Remove leading spaces
stdout = "\n".join(line.rstrip(" ") for line in stdout.splitlines())
# Assert that only columns with values are displayed
expected_output = textwrap.dedent(
"""\
name | hooks
================+===================================================
test-plugin-cli | tests.cli.commands.test_plugins_command.PluginHook
"""
)
self.assertEqual(stdout, expected_output)
| 41.677686
| 110
| 0.614515
|
import io
import json
import textwrap
import unittest
from contextlib import redirect_stdout
from airflow.cli import cli_parser
from airflow.cli.commands import plugins_command
from airflow.hooks.base import BaseHook
from airflow.listeners.listener import get_listener_manager
from airflow.plugins_manager import AirflowPlugin
from tests.plugins.test_plugin import AirflowTestPlugin as ComplexAirflowPlugin
from tests.test_utils.mock_plugins import mock_plugin_manager
class PluginHook(BaseHook):
pass
class TestPlugin(AirflowPlugin):
name = "test-plugin-cli"
hooks = [PluginHook]
class TestPluginsCommand(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
@mock_plugin_manager(plugins=[])
def test_should_display_no_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
assert 'No plugins loaded' in stdout
@mock_plugin_manager(plugins=[ComplexAirflowPlugin])
def test_should_display_one_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
print(stdout)
info = json.loads(stdout)
assert info == [
{
'name': 'test_plugin',
'macros': ['tests.plugins.test_plugin.plugin_macro'],
'executors': ['tests.plugins.test_plugin.PluginExecutor'],
'flask_blueprints': [
"<flask.blueprints.Blueprint: name='test_plugin' import_name='tests.plugins.test_plugin'>"
],
'appbuilder_views': [
{
'name': 'Test View',
'category': 'Test Plugin',
'view': 'tests.plugins.test_plugin.PluginTestAppBuilderBaseView',
}
],
'global_operator_extra_links': [
'<tests.test_utils.mock_operators.AirflowLink object>',
'<tests.test_utils.mock_operators.GithubLink object>',
],
'timetables': ['tests.plugins.test_plugin.CustomCronDataIntervalTimetable'],
'operator_extra_links': [
'<tests.test_utils.mock_operators.GoogleLink object>',
'<tests.test_utils.mock_operators.AirflowLink2 object>',
'<tests.test_utils.mock_operators.CustomOpLink object>',
'<tests.test_utils.mock_operators.CustomBaseIndexOpLink object>',
],
'hooks': ['tests.plugins.test_plugin.PluginHook'],
'listeners': ['tests.listeners.empty_listener'],
'source': None,
'appbuilder_menu_items': [
{'name': 'Google', 'href': 'https://www.google.com', 'category': 'Search'},
{
'name': 'apache',
'href': 'https://www.apache.org/',
'label': 'The Apache Software Foundation',
},
],
'ti_deps': ['<TIDep(CustomTestTriggerRule)>'],
}
]
get_listener_manager().clear()
@mock_plugin_manager(plugins=[TestPlugin])
def test_should_display_one_plugins_as_table(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=table']))
stdout = temp_stdout.getvalue()
stdout = "\n".join(line.rstrip(" ") for line in stdout.splitlines())
expected_output = textwrap.dedent(
"""\
name | hooks
================+===================================================
test-plugin-cli | tests.cli.commands.test_plugins_command.PluginHook
"""
)
self.assertEqual(stdout, expected_output)
| true
| true
|
7903a066a37362e1d8bae1b7251ccd6490045f0b
| 1,399
|
py
|
Python
|
bot/handlers/packs/list.py
|
Bixshadow1/sticker-thief
|
bda28b2f28ed65e35ac62c165c2517412b5f6f8f
|
[
"MIT"
] | 44
|
2018-10-30T14:47:14.000Z
|
2022-03-26T15:17:52.000Z
|
bot/handlers/packs/list.py
|
Bixshadow1/sticker-thief
|
bda28b2f28ed65e35ac62c165c2517412b5f6f8f
|
[
"MIT"
] | 37
|
2018-11-09T11:51:15.000Z
|
2021-12-27T15:08:48.000Z
|
bot/handlers/packs/list.py
|
Bixshadow1/sticker-thief
|
bda28b2f28ed65e35ac62c165c2517412b5f6f8f
|
[
"MIT"
] | 38
|
2019-03-27T21:12:23.000Z
|
2022-01-08T07:57:39.000Z
|
import logging
# noinspection PyPackageRequirements
from telegram.ext import CommandHandler, ConversationHandler
# noinspection PyPackageRequirements
from telegram import ChatAction, Update
from bot import stickersbot
from bot.utils import decorators
from bot.utils import utils
from bot.database.base import session_scope
from bot.database.models.pack import Pack
from bot.strings import Strings
logger = logging.getLogger(__name__)
@decorators.action(ChatAction.TYPING)
@decorators.restricted
@decorators.failwithmessage
def on_list_command(update: Update, _):
logger.info('/list')
# packs = db.get_user_packs(update.effective_user.id, as_namedtuple=True)
with session_scope() as session:
packs = session.query(Pack).filter_by(user_id=update.effective_user.id).order_by(Pack.title).all()
packs = packs[:98] # can't include more than 100 entities
strings_list = ['<a href="{}">{}</a> ({})'.format(utils.name2link(pack.name), pack.title, 'a' if pack.is_animated else 's') for pack in packs]
if not strings_list:
update.message.reply_text(Strings.LIST_NO_PACKS)
return
update.message.reply_html('• {}'.format('\n• '.join(strings_list)) + Strings.LIST_FOOTER)
return ConversationHandler.END # /list should end whatever conversation the user was having
stickersbot.add_handler(CommandHandler(['list', 'l'], on_list_command))
| 34.975
| 150
| 0.754825
|
import logging
from telegram.ext import CommandHandler, ConversationHandler
from telegram import ChatAction, Update
from bot import stickersbot
from bot.utils import decorators
from bot.utils import utils
from bot.database.base import session_scope
from bot.database.models.pack import Pack
from bot.strings import Strings
logger = logging.getLogger(__name__)
@decorators.action(ChatAction.TYPING)
@decorators.restricted
@decorators.failwithmessage
def on_list_command(update: Update, _):
logger.info('/list')
with session_scope() as session:
packs = session.query(Pack).filter_by(user_id=update.effective_user.id).order_by(Pack.title).all()
packs = packs[:98]
strings_list = ['<a href="{}">{}</a> ({})'.format(utils.name2link(pack.name), pack.title, 'a' if pack.is_animated else 's') for pack in packs]
if not strings_list:
update.message.reply_text(Strings.LIST_NO_PACKS)
return
update.message.reply_html('• {}'.format('\n• '.join(strings_list)) + Strings.LIST_FOOTER)
return ConversationHandler.END # /list should end whatever conversation the user was having
stickersbot.add_handler(CommandHandler(['list', 'l'], on_list_command))
| true
| true
|
7903a0d0a7d350892d692d86b8bbd1dc00694d86
| 257
|
py
|
Python
|
settings/__init__.py
|
ppold/lambtastic
|
29d96f0f111a950a6ecd7af1cdc172addd64de04
|
[
"Unlicense"
] | null | null | null |
settings/__init__.py
|
ppold/lambtastic
|
29d96f0f111a950a6ecd7af1cdc172addd64de04
|
[
"Unlicense"
] | 1
|
2021-06-01T21:53:04.000Z
|
2021-06-01T21:53:04.000Z
|
settings/__init__.py
|
ppold/lambtastic
|
29d96f0f111a950a6ecd7af1cdc172addd64de04
|
[
"Unlicense"
] | null | null | null |
""" core app configuration """
import os
environment = os.getenv('LAMBTASTIC_ENV', 'development')
if environment == 'testing':
from .testing import *
elif environment == 'production':
from .production import *
else:
from .development import *
| 21.416667
| 56
| 0.696498
|
import os
environment = os.getenv('LAMBTASTIC_ENV', 'development')
if environment == 'testing':
from .testing import *
elif environment == 'production':
from .production import *
else:
from .development import *
| true
| true
|
7903a182889892983f3e0a32fa7fa89dda9d112b
| 1,291
|
py
|
Python
|
examples/plot_obstacle_avoidance_2d.py
|
maotto/movement_primitives
|
b79c78a5a0667cc24a26b7b6cc64a5762d8f4dd4
|
[
"BSD-3-Clause"
] | 17
|
2021-11-17T15:36:16.000Z
|
2022-03-26T08:49:25.000Z
|
examples/plot_obstacle_avoidance_2d.py
|
DavidYaonanZhu/movement_primitives
|
ce355837f06cb5fada24be7259cb0305e8ea5d91
|
[
"BSD-3-Clause"
] | 9
|
2021-12-01T10:33:04.000Z
|
2022-03-23T12:41:39.000Z
|
examples/plot_obstacle_avoidance_2d.py
|
DavidYaonanZhu/movement_primitives
|
ce355837f06cb5fada24be7259cb0305e8ea5d91
|
[
"BSD-3-Clause"
] | 8
|
2021-11-25T03:53:40.000Z
|
2022-03-31T03:19:25.000Z
|
"""
========================
Obstacle Avoidance in 2D
========================
Plots a 2D DMP that goes through a point obstacle when there is no coupling
term for obstacle avoidance and a 2D DMP that avoids the point obstacle with
a coupling term.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D
execution_time = 1.0
start_y = np.zeros(2)
goal_y = np.ones(2)
dmp = DMP(n_dims=2, execution_time=execution_time, n_weights_per_dim=3)
dmp.configure(start_y=start_y, goal_y=goal_y)
dmp.set_weights(np.array([-50.0, 100.0, 300.0, -200.0, -200.0, -200.0]))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("x")
ax.set_ylabel("y")
obstacle_position = np.array([0.92, 0.5])
T, Y = dmp.open_loop(run_t=execution_time)
ax.plot(Y[:, 0], Y[:, 1], label="Original")
coupling_term = CouplingTermObstacleAvoidance2D(obstacle_position)
T, Y = dmp.open_loop(run_t=execution_time, coupling_term=coupling_term)
ax.plot(Y[:, 0], Y[:, 1], label="Obstacle avoidance")
ax.scatter(start_y[0], start_y[1], c="r", label="Start")
ax.scatter(goal_y[0], goal_y[1], c="g", label="Goal")
ax.scatter(obstacle_position[0], obstacle_position[1], c="y", label="Obstacle")
ax.legend()
plt.tight_layout()
plt.show()
| 30.738095
| 79
| 0.711077
|
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D
execution_time = 1.0
start_y = np.zeros(2)
goal_y = np.ones(2)
dmp = DMP(n_dims=2, execution_time=execution_time, n_weights_per_dim=3)
dmp.configure(start_y=start_y, goal_y=goal_y)
dmp.set_weights(np.array([-50.0, 100.0, 300.0, -200.0, -200.0, -200.0]))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("x")
ax.set_ylabel("y")
obstacle_position = np.array([0.92, 0.5])
T, Y = dmp.open_loop(run_t=execution_time)
ax.plot(Y[:, 0], Y[:, 1], label="Original")
coupling_term = CouplingTermObstacleAvoidance2D(obstacle_position)
T, Y = dmp.open_loop(run_t=execution_time, coupling_term=coupling_term)
ax.plot(Y[:, 0], Y[:, 1], label="Obstacle avoidance")
ax.scatter(start_y[0], start_y[1], c="r", label="Start")
ax.scatter(goal_y[0], goal_y[1], c="g", label="Goal")
ax.scatter(obstacle_position[0], obstacle_position[1], c="y", label="Obstacle")
ax.legend()
plt.tight_layout()
plt.show()
| true
| true
|
7903a2b3166c68ad45b0be16d923c6908edcc39f
| 3,730
|
py
|
Python
|
exp_main.py
|
dongzhiming/cgp-cnn-PyTorch
|
be9d3ee63741ef59bac7cf3c905833d747267207
|
[
"MIT"
] | null | null | null |
exp_main.py
|
dongzhiming/cgp-cnn-PyTorch
|
be9d3ee63741ef59bac7cf3c905833d747267207
|
[
"MIT"
] | null | null | null |
exp_main.py
|
dongzhiming/cgp-cnn-PyTorch
|
be9d3ee63741ef59bac7cf3c905833d747267207
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import pickle
import pandas as pd
from cgp import *
from cgp_config import *
from cnn_train import CNN_train
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evolving CAE structures')
parser.add_argument('--gpu_num', '-g', type=int, default=1, help='Num. of GPUs')
parser.add_argument('--lam', '-l', type=int, default=2, help='Num. of offsprings')
parser.add_argument('--net_info_file', default='network_info.pickle', help='Network information file name')
parser.add_argument('--log_file', default='./log_cgp.txt', help='Log file name')
parser.add_argument('--mode', '-m', default='evolution', help='Mode (evolution / retrain / reevolution)')
parser.add_argument('--init', '-i', action='store_true')
args = parser.parse_args()
# --- Optimization of the CNN architecture ---
if args.mode == 'evolution':
# Create CGP configuration and save network information
network_info = CgpInfoConvSet(rows=5, cols=30, level_back=10, min_active_num=1, max_active_num=30)
with open(args.net_info_file, mode='wb') as f:
pickle.dump(network_info, f)
# Evaluation function for CGP (training CNN and return validation accuracy)
imgSize = 32
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
# Execute evolution
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize, init=args.init)
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file=args.log_file)
# --- Retraining evolved architecture ---
elif args.mode == 'retrain':
print('Retrain')
# In the case of existing log_cgp.txt
# Load CGP configuration
with open(args.net_info_file, mode='rb') as f:
network_info = pickle.load(f)
# Load network architecture
cgp = CGP(network_info, None)
data = pd.read_csv(args.log_file, header=None) # Load log file
cgp.load_log(list(data.tail(1).values.flatten().astype(int))) # Read the log at final generation
print(cgp._log_data(net_info_type='active_only', start_time=0))
# Retraining the network
temp = CNN_train('cifar10', validation=False, verbose=True, batchsize=128)
acc = temp(cgp.pop[0].active_net_list(), 0, epoch_num=500, out_model='retrained_net.model')
print(acc)
# # otherwise (in the case where we do not have a log file.)
# temp = CNN_train('haze1', validation=False, verbose=True, imgSize=128, batchsize=16)
# cgp = [['input', 0], ['S_SumConvBlock_64_3', 0], ['S_ConvBlock_64_5', 1], ['S_SumConvBlock_128_1', 2], ['S_SumConvBlock_64_1', 3], ['S_SumConvBlock_64_5', 4], ['S_DeConvBlock_3_3', 5]]
# acc = temp(cgp, 0, epoch_num=500, out_model='retrained_net.model')
elif args.mode == 'reevolution':
# restart evolution
print('Restart Evolution')
imgSize = 64
with open('network_info.pickle', mode='rb') as f:
network_info = pickle.load(f)
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize)
data = pd.read_csv('./log_cgp.txt', header=None)
cgp.load_log(list(data.tail(1).values.flatten().astype(int)))
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file='./log_restat.txt')
else:
print('Undefined mode. Please check the "-m evolution or retrain or reevolution" ')
| 49.078947
| 194
| 0.657105
|
import argparse
import pickle
import pandas as pd
from cgp import *
from cgp_config import *
from cnn_train import CNN_train
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evolving CAE structures')
parser.add_argument('--gpu_num', '-g', type=int, default=1, help='Num. of GPUs')
parser.add_argument('--lam', '-l', type=int, default=2, help='Num. of offsprings')
parser.add_argument('--net_info_file', default='network_info.pickle', help='Network information file name')
parser.add_argument('--log_file', default='./log_cgp.txt', help='Log file name')
parser.add_argument('--mode', '-m', default='evolution', help='Mode (evolution / retrain / reevolution)')
parser.add_argument('--init', '-i', action='store_true')
args = parser.parse_args()
if args.mode == 'evolution':
network_info = CgpInfoConvSet(rows=5, cols=30, level_back=10, min_active_num=1, max_active_num=30)
with open(args.net_info_file, mode='wb') as f:
pickle.dump(network_info, f)
imgSize = 32
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize, init=args.init)
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file=args.log_file)
elif args.mode == 'retrain':
print('Retrain')
with open(args.net_info_file, mode='rb') as f:
network_info = pickle.load(f)
cgp = CGP(network_info, None)
data = pd.read_csv(args.log_file, header=None)
cgp.load_log(list(data.tail(1).values.flatten().astype(int)))
print(cgp._log_data(net_info_type='active_only', start_time=0))
temp = CNN_train('cifar10', validation=False, verbose=True, batchsize=128)
acc = temp(cgp.pop[0].active_net_list(), 0, epoch_num=500, out_model='retrained_net.model')
print(acc)
ution':
print('Restart Evolution')
imgSize = 64
with open('network_info.pickle', mode='rb') as f:
network_info = pickle.load(f)
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize)
data = pd.read_csv('./log_cgp.txt', header=None)
cgp.load_log(list(data.tail(1).values.flatten().astype(int)))
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file='./log_restat.txt')
else:
print('Undefined mode. Please check the "-m evolution or retrain or reevolution" ')
| true
| true
|
7903a33e3a53df70eddcd8b57369e1f35cdec02f
| 3,836
|
py
|
Python
|
tftrt/examples/object_detection/test.py
|
npanpaliya/tensorrt
|
74bbdaad7c0fa0a559cb98b8ba0f98059aca3329
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:05:13.000Z
|
2019-10-10T06:05:13.000Z
|
tftrt/examples/object_detection/test.py
|
npanpaliya/tensorrt
|
74bbdaad7c0fa0a559cb98b8ba0f98059aca3329
|
[
"Apache-2.0"
] | null | null | null |
tftrt/examples/object_detection/test.py
|
npanpaliya/tensorrt
|
74bbdaad7c0fa0a559cb98b8ba0f98059aca3329
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:05:15.000Z
|
2019-10-10T06:05:15.000Z
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import json
from .object_detection import build_model, download_dataset, optimize_model, benchmark_model
def test(test_config_path):
"""Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file.
"""
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(
**test_config['model_config'])
# optimize model using source model
frozen_graph = optimize_model(
frozen_graph,
**test_config['optimization_config'])
# benchmark optimized model
statistics = benchmark_model(
frozen_graph=frozen_graph,
**test_config['benchmark_config'])
# print some statistics to command line
print_statistics = statistics
if 'runtimes_ms' in print_statistics:
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
# run assertions
if 'assertions' in test_config:
for a in test_config['assertions']:
if not eval(a):
raise AssertionError('ASSERTION FAILED: %s' % a)
else:
print('ASSERTION PASSED: %s' % a)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'test_config_path',
help='Path of JSON file containing test configuration. Please'
'see help(tftrt.examples.object_detection.test) for more information')
args=parser.parse_args()
test(args.test_config_path)
| 36.188679
| 92
| 0.67049
|
import argparse
import json
from .object_detection import build_model, download_dataset, optimize_model, benchmark_model
def test(test_config_path):
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(
**test_config['model_config'])
frozen_graph = optimize_model(
frozen_graph,
**test_config['optimization_config'])
statistics = benchmark_model(
frozen_graph=frozen_graph,
**test_config['benchmark_config'])
print_statistics = statistics
if 'runtimes_ms' in print_statistics:
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
if 'assertions' in test_config:
for a in test_config['assertions']:
if not eval(a):
raise AssertionError('ASSERTION FAILED: %s' % a)
else:
print('ASSERTION PASSED: %s' % a)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'test_config_path',
help='Path of JSON file containing test configuration. Please'
'see help(tftrt.examples.object_detection.test) for more information')
args=parser.parse_args()
test(args.test_config_path)
| true
| true
|
7903a3d3edd3e1433d6a5728a3155b0ca2d1b362
| 2,852
|
py
|
Python
|
oxlos/migrations/0001_initial.py
|
jtauber/oxlos2
|
5122a3d6407e233c0b4b0c001d66ef7c1fefd0d2
|
[
"MIT"
] | 1
|
2017-11-26T03:41:02.000Z
|
2017-11-26T03:41:02.000Z
|
oxlos/migrations/0001_initial.py
|
jtauber/oxlos2
|
5122a3d6407e233c0b4b0c001d66ef7c1fefd0d2
|
[
"MIT"
] | null | null | null |
oxlos/migrations/0001_initial.py
|
jtauber/oxlos2
|
5122a3d6407e233c0b4b0c001d66ef7c1fefd0d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-15 06:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('pinax_teams', '0002_add_simple_models'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', jsonfield.fields.JSONField()),
],
),
migrations.CreateModel(
name='ItemResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('answer', models.TextField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_teams.SimpleTeam')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('instructions', models.TextField()),
('instructions_html', models.TextField(blank=True, editable=False)),
('question_template', models.TextField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='oxlos.Project')),
],
),
migrations.AddField(
model_name='item',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Task'),
),
]
| 41.941176
| 134
| 0.596073
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('pinax_teams', '0002_add_simple_models'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', jsonfield.fields.JSONField()),
],
),
migrations.CreateModel(
name='ItemResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('answer', models.TextField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_teams.SimpleTeam')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('instructions', models.TextField()),
('instructions_html', models.TextField(blank=True, editable=False)),
('question_template', models.TextField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='oxlos.Project')),
],
),
migrations.AddField(
model_name='item',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Task'),
),
]
| true
| true
|
7903a3e213f7a0608dcf8761336c290a8584de29
| 578
|
py
|
Python
|
app/application.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
app/application.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
app/application.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
import uvloop
import asyncio
import jinja2
import aiohttp_jinja2
from aiohttp import web
from quicksets import settings
from app.middlewares import middlewares
from app.views import routes
async def create_app():
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = web.Application(middlewares=middlewares)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATES_PATH))
app.add_routes(routes)
return app
if __name__ == '__main__':
app = create_app()
web.run_app(app, host=settings.HOST, port=settings.PORT)
| 24.083333
| 69
| 0.769896
|
import uvloop
import asyncio
import jinja2
import aiohttp_jinja2
from aiohttp import web
from quicksets import settings
from app.middlewares import middlewares
from app.views import routes
async def create_app():
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = web.Application(middlewares=middlewares)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATES_PATH))
app.add_routes(routes)
return app
if __name__ == '__main__':
app = create_app()
web.run_app(app, host=settings.HOST, port=settings.PORT)
| true
| true
|
7903a427a62687795f2f212998a785a69152972f
| 962
|
py
|
Python
|
try.py
|
peterzheng98/Valentine-Gift
|
d4212c2e648682ccb173dfa39a0873fc0ad2b9c3
|
[
"MIT"
] | 2
|
2020-04-09T07:29:06.000Z
|
2020-10-04T02:19:21.000Z
|
try.py
|
peterzheng98/Valentine-Gift
|
d4212c2e648682ccb173dfa39a0873fc0ad2b9c3
|
[
"MIT"
] | null | null | null |
try.py
|
peterzheng98/Valentine-Gift
|
d4212c2e648682ccb173dfa39a0873fc0ad2b9c3
|
[
"MIT"
] | null | null | null |
"""
ECB没有偏移量
"""
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
from utils import DES_decrypt, DES_encrypt
def add_to_16(text):
if len(text.encode('utf-8')) % 16:
add = 16 - (len(text.encode('utf-8')) % 16)
else:
add = 0
text = text + ('\0' * add)
return text.encode('utf-8')
# 加密函数
def encrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
text = add_to_16(text)
cryptos = AES.new(key, mode)
cipher_text = cryptos.encrypt(text)
return b2a_hex(cipher_text)
# 解密后,去掉补足的空格用strip() 去掉
def decrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
cryptor = AES.new(key, mode)
plain_text = cryptor.decrypt(a2b_hex(text))
return bytes.decode(plain_text).rstrip('\0')
if __name__ == '__main__':
e = DES_encrypt("hello world") # 加密
print(type(e))
d = DES_decrypt(e) # 解密
print("加密:", e)
print("解密:", d)
| 22.904762
| 51
| 0.626819
|
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
from utils import DES_decrypt, DES_encrypt
def add_to_16(text):
if len(text.encode('utf-8')) % 16:
add = 16 - (len(text.encode('utf-8')) % 16)
else:
add = 0
text = text + ('\0' * add)
return text.encode('utf-8')
def encrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
text = add_to_16(text)
cryptos = AES.new(key, mode)
cipher_text = cryptos.encrypt(text)
return b2a_hex(cipher_text)
def decrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
cryptor = AES.new(key, mode)
plain_text = cryptor.decrypt(a2b_hex(text))
return bytes.decode(plain_text).rstrip('\0')
if __name__ == '__main__':
e = DES_encrypt("hello world")
print(type(e))
d = DES_decrypt(e)
print("加密:", e)
print("解密:", d)
| true
| true
|
7903a4f1c2fc12303c69a76acfe1a1e034df61a3
| 4,943
|
py
|
Python
|
esp_sdk/models/role.py
|
zimmermanc/esp-sdk-python
|
cdef13c0dc6c3996b6c444160c71b2f1e3910c97
|
[
"MIT"
] | 6
|
2017-06-05T20:37:19.000Z
|
2019-04-10T08:43:59.000Z
|
esp_sdk/models/role.py
|
zimmermanc/esp-sdk-python
|
cdef13c0dc6c3996b6c444160c71b2f1e3910c97
|
[
"MIT"
] | 18
|
2016-06-22T16:14:33.000Z
|
2018-10-29T21:53:15.000Z
|
esp_sdk/models/role.py
|
zimmermanc/esp-sdk-python
|
cdef13c0dc6c3996b6c444160c71b2f1e3910c97
|
[
"MIT"
] | 18
|
2016-07-27T19:20:01.000Z
|
2020-11-17T02:09:58.000Z
|
# coding: utf-8
"""
ESP Documentation
The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures.
OpenAPI spec version: v2_sdk
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
from ..extensions.base_object import BaseObject
import re
class Role(BaseObject):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, created_at=None, updated_at=None):
"""
Role - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'created_at': 'datetime',
'updated_at': 'datetime'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'created_at': 'created_at',
'updated_at': 'updated_at'
}
self._id = id
self._name = name
self._created_at = created_at
self._updated_at = updated_at
@property
def id(self):
"""
Gets the id of this Role.
Unique ID
:return: The id of this Role.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Role.
Unique ID
:param id: The id of this Role.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Role.
The name of the role
:return: The name of this Role.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Role.
The name of the role
:param name: The name of this Role.
:type: str
"""
self._name = name
@property
def created_at(self):
"""
Gets the created_at of this Role.
ISO 8601 timestamp when the resource was created
:return: The created_at of this Role.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this Role.
ISO 8601 timestamp when the resource was created
:param created_at: The created_at of this Role.
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""
Gets the updated_at of this Role.
ISO 8601 timestamp when the resource was updated
:return: The updated_at of this Role.
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""
Sets the updated_at of this Role.
ISO 8601 timestamp when the resource was updated
:param updated_at: The updated_at of this Role.
:type: datetime
"""
self._updated_at = updated_at
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Role):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.091371
| 230
| 0.539146
|
from pprint import pformat
from six import iteritems
from ..extensions.base_object import BaseObject
import re
class Role(BaseObject):
def __init__(self, id=None, name=None, created_at=None, updated_at=None):
self.swagger_types = {
'id': 'int',
'name': 'str',
'created_at': 'datetime',
'updated_at': 'datetime'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'created_at': 'created_at',
'updated_at': 'updated_at'
}
self._id = id
self._name = name
self._created_at = created_at
self._updated_at = updated_at
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def created_at(self):
return self._created_at
@created_at.setter
def created_at(self, created_at):
self._created_at = created_at
@property
def updated_at(self):
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
self._updated_at = updated_at
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Role):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7903a6300ecfd9746ee2562acb15c2944a87f353
| 25,065
|
py
|
Python
|
scanpy/_utils.py
|
jwrth/scanpy
|
9fa01020d1f0712166b3591e67d0c766c765eca0
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/_utils.py
|
jwrth/scanpy
|
9fa01020d1f0712166b3591e67d0c766c765eca0
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/_utils.py
|
jwrth/scanpy
|
9fa01020d1f0712166b3591e67d0c766c765eca0
|
[
"BSD-3-Clause"
] | null | null | null |
"""Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities']
| 32.636719
| 105
| 0.596848
|
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
AnyRandom = Union[None, int, random.RandomState]
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
def get_igraph_from_adjacency(adjacency, directed=None):
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
if normalization == 'prediction':
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
def sanitize_anndata(adata):
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
updated_params = dict(old_params)
if new_params:
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
if np.signbit(data).any():
return False
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
loader.exec_module(module)
return module
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities']
| true
| true
|
7903a6723125475069758ef729d05f17c07e573c
| 19,928
|
py
|
Python
|
roundoff.py
|
garrettkatz/rnn-fxpts
|
0e4ea0fe89c51764f000610957d0382917fe227c
|
[
"MIT"
] | 2
|
2019-11-19T07:40:44.000Z
|
2021-11-13T09:55:07.000Z
|
roundoff.py
|
garrettkatz/rnn-fxpts
|
0e4ea0fe89c51764f000610957d0382917fe227c
|
[
"MIT"
] | 1
|
2016-12-09T18:04:08.000Z
|
2016-12-09T18:04:19.000Z
|
roundoff.py
|
garrettkatz/rnn-fxpts
|
0e4ea0fe89c51764f000610957d0382917fe227c
|
[
"MIT"
] | 2
|
2017-07-21T01:19:10.000Z
|
2019-06-26T05:37:05.000Z
|
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
| 44.088496
| 163
| 0.616118
|
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1:
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1:
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
| true
| true
|
7903a8fb9ca76323128827464afde2bac737afe6
| 1,384
|
py
|
Python
|
package/diana/utils/dicom/strings.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 15
|
2019-02-12T23:26:09.000Z
|
2021-12-21T08:53:58.000Z
|
package/diana/utils/dicom/strings.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 2
|
2019-01-23T21:13:12.000Z
|
2019-06-28T15:45:51.000Z
|
package/diana/utils/dicom/strings.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 6
|
2019-01-23T20:22:50.000Z
|
2022-02-03T03:27:04.000Z
|
import logging
from datetime import datetime
from dateutil import parser as DatetimeParser
def dicom_name(names: list) -> str:
s = "^".join(names).upper()
return s
def dicom_date(dt: datetime) -> str:
s = dt.strftime("%Y%m%d")
return s
def dicom_time(dt: datetime) -> str:
s = dt.strftime("%H%M%S")
return s
def dicom_datetime(dt: datetime) -> (str, str):
d = dicom_date(dt)
t = dicom_time(dt)
return d, t
def parse_dicom_datetime(dts: str, tms: str = None) -> datetime:
if tms:
dts = dts + tms
# GE Scanner dt format
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S")
return ts
except ValueError:
# Wrong format
pass
# Siemens scanners use fractional seconds
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S.%f")
return ts
except ValueError:
# Wrong format
pass
# Unknown format, fall back on guessing
try:
# Parser does _not_ like fractional seconds
dts = dts.split(".")[0]
ts = DatetimeParser.parse(dts)
return ts
except ValueError:
# Wrong format
pass
logger = logging.getLogger("DcmStrings")
logger.error(f"Failed to parse date time string: {dts}")
def date_str_to_dicom(dstr):
dt = DatetimeParser.parse(dstr)
dcm_dt = dicom_date(dt)
return dcm_dt
| 20.969697
| 64
| 0.604769
|
import logging
from datetime import datetime
from dateutil import parser as DatetimeParser
def dicom_name(names: list) -> str:
s = "^".join(names).upper()
return s
def dicom_date(dt: datetime) -> str:
s = dt.strftime("%Y%m%d")
return s
def dicom_time(dt: datetime) -> str:
s = dt.strftime("%H%M%S")
return s
def dicom_datetime(dt: datetime) -> (str, str):
d = dicom_date(dt)
t = dicom_time(dt)
return d, t
def parse_dicom_datetime(dts: str, tms: str = None) -> datetime:
if tms:
dts = dts + tms
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S")
return ts
except ValueError:
pass
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S.%f")
return ts
except ValueError:
pass
try:
dts = dts.split(".")[0]
ts = DatetimeParser.parse(dts)
return ts
except ValueError:
pass
logger = logging.getLogger("DcmStrings")
logger.error(f"Failed to parse date time string: {dts}")
def date_str_to_dicom(dstr):
dt = DatetimeParser.parse(dstr)
dcm_dt = dicom_date(dt)
return dcm_dt
| true
| true
|
7903a974548bdce76744db90cef9c70bcc677625
| 489
|
py
|
Python
|
python/daisyHat/Tools.py
|
recursinging/daisyHat
|
94a3a2f8da13ee4df372027058f2741c84493a0e
|
[
"MIT"
] | null | null | null |
python/daisyHat/Tools.py
|
recursinging/daisyHat
|
94a3a2f8da13ee4df372027058f2741c84493a0e
|
[
"MIT"
] | null | null | null |
python/daisyHat/Tools.py
|
recursinging/daisyHat
|
94a3a2f8da13ee4df372027058f2741c84493a0e
|
[
"MIT"
] | null | null | null |
def printBigHeadline(text):
print("")
print("#######################################################################")
print(text)
print("#######################################################################")
print("")
def printSmallHeadline(text):
print("")
print("-----------------------------------------------------------------------")
print(text)
print("-----------------------------------------------------------------------")
print("")
| 30.5625
| 84
| 0.216769
|
def printBigHeadline(text):
print("")
print("#######################################################################")
print(text)
print("#######################################################################")
print("")
def printSmallHeadline(text):
print("")
print("-----------------------------------------------------------------------")
print(text)
print("-----------------------------------------------------------------------")
print("")
| true
| true
|
7903a97e804c4a39716d89d35ecf9953e1065a81
| 1,812
|
py
|
Python
|
tests/SearchTest.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
tests/SearchTest.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
tests/SearchTest.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
import datetime
from datetime import datetime, timedelta
from time import sleep
from app.search import add_to_index, delete_index, create_index, query_index
from app import db
from app.models import Post, User
from tests.BaseDbTest import BaseDbTest
class SearchTest(BaseDbTest):
index_name = "test_index"
def setUp(self):
super(SearchTest, self).setUp()
create_index(SearchTest.index_name)
def tearDown(self):
super(SearchTest, self).tearDown()
delete_index(SearchTest.index_name)
def test_index_posts(self):
# create four users
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add_all([u1, u2])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post post1 from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post post2 from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post post3 from john", author=u1,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post post4 from john", author=u1,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
add_to_index(SearchTest.index_name, p1)
add_to_index(SearchTest.index_name, p2)
add_to_index(SearchTest.index_name, p3)
add_to_index(SearchTest.index_name, p4)
sleep(1)
ids, total = query_index(SearchTest.index_name, "post1", 1, 20)
self.assertEqual(1, total)
self.assertEqual(p1.id, ids[0])
ids, total = query_index(SearchTest.index_name, "post", 1, 20)
self.assertEqual(4, total)
| 32.357143
| 76
| 0.640177
|
import datetime
from datetime import datetime, timedelta
from time import sleep
from app.search import add_to_index, delete_index, create_index, query_index
from app import db
from app.models import Post, User
from tests.BaseDbTest import BaseDbTest
class SearchTest(BaseDbTest):
index_name = "test_index"
def setUp(self):
super(SearchTest, self).setUp()
create_index(SearchTest.index_name)
def tearDown(self):
super(SearchTest, self).tearDown()
delete_index(SearchTest.index_name)
def test_index_posts(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add_all([u1, u2])
now = datetime.utcnow()
p1 = Post(body="post post1 from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post post2 from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post post3 from john", author=u1,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post post4 from john", author=u1,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
add_to_index(SearchTest.index_name, p1)
add_to_index(SearchTest.index_name, p2)
add_to_index(SearchTest.index_name, p3)
add_to_index(SearchTest.index_name, p4)
sleep(1)
ids, total = query_index(SearchTest.index_name, "post1", 1, 20)
self.assertEqual(1, total)
self.assertEqual(p1.id, ids[0])
ids, total = query_index(SearchTest.index_name, "post", 1, 20)
self.assertEqual(4, total)
| true
| true
|
7903aa2f3529e574a160e845cf50e4d4ec2f563c
| 265
|
py
|
Python
|
el_galleria/urls.py
|
kennjr/mi-galleria
|
3103873e4cfcd2f1c6389362bd6de3bf08f7cf24
|
[
"MIT"
] | null | null | null |
el_galleria/urls.py
|
kennjr/mi-galleria
|
3103873e4cfcd2f1c6389362bd6de3bf08f7cf24
|
[
"MIT"
] | null | null | null |
el_galleria/urls.py
|
kennjr/mi-galleria
|
3103873e4cfcd2f1c6389362bd6de3bf08f7cf24
|
[
"MIT"
] | null | null | null |
from django.urls import path
from el_galleria import views
urlpatterns = [
path('', views.index, name="home"),
path('category/<str:selected_category>/', views.category, name="category"),
path('search/<str:search_str>/', views.search, name="search")
]
| 26.5
| 79
| 0.69434
|
from django.urls import path
from el_galleria import views
urlpatterns = [
path('', views.index, name="home"),
path('category/<str:selected_category>/', views.category, name="category"),
path('search/<str:search_str>/', views.search, name="search")
]
| true
| true
|
7903aa4fefc0d2e42140065c21a984ee0e62943c
| 7,222
|
py
|
Python
|
encode.py
|
deut-erium/BASEic-steganography
|
370291442423f866ba5c4976d5e8766ae2d249ba
|
[
"MIT"
] | 1
|
2020-08-26T03:52:18.000Z
|
2020-08-26T03:52:18.000Z
|
encode.py
|
deut-erium/BASEic-steganography
|
370291442423f866ba5c4976d5e8766ae2d249ba
|
[
"MIT"
] | null | null | null |
encode.py
|
deut-erium/BASEic-steganography
|
370291442423f866ba5c4976d5e8766ae2d249ba
|
[
"MIT"
] | null | null | null |
"""inter-base steganography
producing base32 and base64 decodable strings"""
from base64 import b64encode, b64decode
import string
from itertools import product
from argparse import ArgumentParser
CHARSET = string.printable.encode()
B32_CHARSET = (string.ascii_uppercase + '234567').encode()
B64_CHARSET = (
string.ascii_lowercase +
string.ascii_uppercase +
string.digits +
'+/').encode()
ASCII_LOWER = string.ascii_lowercase.encode()
WHITESPACE = string.whitespace.encode()
ALPHA_SPACE = (
string.ascii_uppercase +
string.ascii_lowercase +
string.whitespace).encode()
ASCII_SUBS = {"a": ["a", "A", "4", "@"],
"b": ["b", "B", "8", "6"],
"c": ["c", "C", "("],
"d": ["d", "D"],
"e": ["e", "E", "3"],
"f": ["f", "F"],
"g": ["g", "G", "6", "9"],
"h": ["h", "H", "#"],
"i": ["i", "I", "1", "|", "!"],
"j": ["j", "J", "]", ";"],
"k": ["k", "K"],
"l": ["l", "L", "1", "|"],
"m": ["m", "M"],
"n": ["n", "N"],
"o": ["o", "O", "0"],
"p": ["p", "P"],
"q": ["q", "Q", "9"],
"r": ["r", "R", "2"],
"s": ["s", "S", "5", "$"],
"t": ["t", "T", "7", "+"],
"u": ["u", "U"],
"v": ["v", "V"],
"w": ["w", "W"],
"x": ["x", "X"],
"y": ["y", "Y"],
"z": ["z", "Z", "2", "%"],
"0": ["0"],
"1": ["1"],
"2": ["2"],
"3": ["3"],
"4": ["4"],
"5": ["5"],
"6": ["6"],
"7": ["7"],
"8": ["8"],
"9": ["9"],
" ": [" ", "\t", "_"]
}
def all_variations(word: str) -> list:
"""
Produce all single-character leet variations of a string
"""
ans = [""]
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [x + y for x in ans for y in leet_letter]
return ans
def variation_gen(word: str):
"""
Produces all single-character leet variations of a string
Args:
word: a 3 character string to generate all variations
Returns:
generator: generator for all possible leet variations
"""
return product(*(ASCII_SUBS[i] for i in word))
def all_valid_variations(word: str) -> list:
"""
Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations
"""
result = []
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
result.append("".join(variation))
return result
def valid_variation(word: str) -> str:
"""
Generates a single valid variation
Args:
word: the triplet to generate a variation from
Returns:
str: A valid variation of `word` or None otherwise
"""
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
return "".join(variation)
return None
# List to precompute the triplets for which there doesnt exist a valid
# variation
NON_LEET = []
for perm in product(string.ascii_lowercase + ' ' + string.digits, repeat=3):
if not valid_variation(''.join(perm)):
NON_LEET.append(''.join(perm))
def transform(strng: str) -> str:
"""
Transform the string to only lower alpha and numerics and spaces
Converts uppercase to lower case and strips all other characters except
space
"""
for char in string.punctuation + string.whitespace[1:]:
strng = strng.replace(char, '')
return strng.lower() + ' ' * (8 - len(strng) % 8)
def master_encode(strng: str) -> bytes:
"""
Encodes a string to its leet equivalent (sans punctuation) which when
base64 encoded contains only base32 characters
"""
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while i < len(strng):
try:
current = strng[i:i + 3]
if current in NON_LEET:
if current[:2] + ' ' not in NON_LEET:
result += valid_variation(current[:2] + ' ')
i += 2
elif current[0] + ' ' not in NON_LEET:
result += valid_variation(current[0] + ' ')
i += 1
elif ' {} '.format(current[0]) not in NON_LEET:
result += valid_variation(' {} '.format(current[0]))
i += 1
elif ' {}'.format(current[0]) not in NON_LEET:
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode())
if __name__ == "__main__":
PARSER = ArgumentParser(description="")
PARSER.add_argument(
'--input',
help='read a single line directly from input',
action="store_true")
PARSER.add_argument(
'--show',
help='shows the transformed input which results in correct encoding',
action="store_true")
PARSER.add_argument(
'--file',
help='reading text from file for conversion',
action="append")
ARGS = PARSER.parse_args()
TEST_STRING = """Steganography is the practice of concealing a file,
message, image, or video within another file, message, image, or video.
The word steganography comes from Greek steganographia, which combines
the words steganos meaning "covered or concealed", and graphia meaning
"writing". The first recorded use of the term was by Johannes Trithemius
in his Steganographia, a treatise on cryptography and steganography,
disguised as a book on magic. Generally, the hidden messages appear to
be (or to be part of) something else: images, articles, shopping lists,
or some other cover text. For example, the hidden message may be in
invisible ink between the visible lines of a private letter. Some
implementations of steganography that lack a shared secret are forms
of security through obscurity, and key-dependent steganographic schemes
adhere to Kerckhoffs's principle."""
if ARGS.file:
with open(ARGS.file[0], 'rb') as inp_file:
TEST_STRING = inp_file.read()
else:
TEST_STRING = input("input the line to encode:\n")
ENCODED_STRING = master_encode(TEST_STRING)
print("ENCODED STRING: {}".format(ENCODED_STRING))
if ARGS.show:
print("Transformed string: {}".format(b64decode(ENCODED_STRING)))
# WTBVICAJV2VSZSBFWHBFY3RJIG4JOSBGTGFHNSBCVXQJYTFMICAJWTBVIDZFVCBJNSB3ZTFS\
# ZCBCYXNFNSBCYSAJTWJPMDJMZSAJTWVOVCBET25UICAJICB3T3JSWSBJVHMJIGYJVW4JIG4JZXZ\
# FIHIJVCNFTGVTNSAJ
| 33.747664
| 82
| 0.542924
|
from base64 import b64encode, b64decode
import string
from itertools import product
from argparse import ArgumentParser
CHARSET = string.printable.encode()
B32_CHARSET = (string.ascii_uppercase + '234567').encode()
B64_CHARSET = (
string.ascii_lowercase +
string.ascii_uppercase +
string.digits +
'+/').encode()
ASCII_LOWER = string.ascii_lowercase.encode()
WHITESPACE = string.whitespace.encode()
ALPHA_SPACE = (
string.ascii_uppercase +
string.ascii_lowercase +
string.whitespace).encode()
ASCII_SUBS = {"a": ["a", "A", "4", "@"],
"b": ["b", "B", "8", "6"],
"c": ["c", "C", "("],
"d": ["d", "D"],
"e": ["e", "E", "3"],
"f": ["f", "F"],
"g": ["g", "G", "6", "9"],
"h": ["h", "H", "#"],
"i": ["i", "I", "1", "|", "!"],
"j": ["j", "J", "]", ";"],
"k": ["k", "K"],
"l": ["l", "L", "1", "|"],
"m": ["m", "M"],
"n": ["n", "N"],
"o": ["o", "O", "0"],
"p": ["p", "P"],
"q": ["q", "Q", "9"],
"r": ["r", "R", "2"],
"s": ["s", "S", "5", "$"],
"t": ["t", "T", "7", "+"],
"u": ["u", "U"],
"v": ["v", "V"],
"w": ["w", "W"],
"x": ["x", "X"],
"y": ["y", "Y"],
"z": ["z", "Z", "2", "%"],
"0": ["0"],
"1": ["1"],
"2": ["2"],
"3": ["3"],
"4": ["4"],
"5": ["5"],
"6": ["6"],
"7": ["7"],
"8": ["8"],
"9": ["9"],
" ": [" ", "\t", "_"]
}
def all_variations(word: str) -> list:
ans = [""]
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [x + y for x in ans for y in leet_letter]
return ans
def variation_gen(word: str):
return product(*(ASCII_SUBS[i] for i in word))
def all_valid_variations(word: str) -> list:
result = []
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
result.append("".join(variation))
return result
def valid_variation(word: str) -> str:
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
return "".join(variation)
return None
NON_LEET = []
for perm in product(string.ascii_lowercase + ' ' + string.digits, repeat=3):
if not valid_variation(''.join(perm)):
NON_LEET.append(''.join(perm))
def transform(strng: str) -> str:
for char in string.punctuation + string.whitespace[1:]:
strng = strng.replace(char, '')
return strng.lower() + ' ' * (8 - len(strng) % 8)
def master_encode(strng: str) -> bytes:
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while i < len(strng):
try:
current = strng[i:i + 3]
if current in NON_LEET:
if current[:2] + ' ' not in NON_LEET:
result += valid_variation(current[:2] + ' ')
i += 2
elif current[0] + ' ' not in NON_LEET:
result += valid_variation(current[0] + ' ')
i += 1
elif ' {} '.format(current[0]) not in NON_LEET:
result += valid_variation(' {} '.format(current[0]))
i += 1
elif ' {}'.format(current[0]) not in NON_LEET:
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode())
if __name__ == "__main__":
PARSER = ArgumentParser(description="")
PARSER.add_argument(
'--input',
help='read a single line directly from input',
action="store_true")
PARSER.add_argument(
'--show',
help='shows the transformed input which results in correct encoding',
action="store_true")
PARSER.add_argument(
'--file',
help='reading text from file for conversion',
action="append")
ARGS = PARSER.parse_args()
TEST_STRING = """Steganography is the practice of concealing a file,
message, image, or video within another file, message, image, or video.
The word steganography comes from Greek steganographia, which combines
the words steganos meaning "covered or concealed", and graphia meaning
"writing". The first recorded use of the term was by Johannes Trithemius
in his Steganographia, a treatise on cryptography and steganography,
disguised as a book on magic. Generally, the hidden messages appear to
be (or to be part of) something else: images, articles, shopping lists,
or some other cover text. For example, the hidden message may be in
invisible ink between the visible lines of a private letter. Some
implementations of steganography that lack a shared secret are forms
of security through obscurity, and key-dependent steganographic schemes
adhere to Kerckhoffs's principle."""
if ARGS.file:
with open(ARGS.file[0], 'rb') as inp_file:
TEST_STRING = inp_file.read()
else:
TEST_STRING = input("input the line to encode:\n")
ENCODED_STRING = master_encode(TEST_STRING)
print("ENCODED STRING: {}".format(ENCODED_STRING))
if ARGS.show:
print("Transformed string: {}".format(b64decode(ENCODED_STRING)))
# WTBVICAJV2VSZSBFWHBFY3RJIG4JOSBGTGFHNSBCVXQJYTFMICAJWTBVIDZFVCBJNSB3ZTFS\
# ZCBCYXNFNSBCYSAJTWJPMDJMZSAJTWVOVCBET25UICAJICB3T3JSWSBJVHMJIGYJVW4JIG4JZXZ\
# FIHIJVCNFTGVTNSAJ
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.